repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Just-D/chromium-1 | tools/telemetry/third_party/gsutilz/third_party/protorpc/gen_protorpc.py | 44 | 9886 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line tool for generating ProtoRPC definitions from descriptors."""
import errno
import logging
import optparse
import os
import sys
from protorpc import descriptor
from protorpc import generate_python
from protorpc import protobuf
from protorpc import registry
from protorpc import transport
from protorpc import util
EXCLUDED_PACKAGES = frozenset(['protorpc.registry',
'protorpc.messages',
'protorpc.descriptor',
'protorpc.message_types',
])
commands = {}
def usage():
"""Print usage help and exit with an error code."""
parser.print_help()
sys.exit(2)
def fatal_error(message):
"""Print fatal error messages exit with an error code.
Args:
message: Message to print to stderr before exit.
"""
sys.stderr.write(message)
sys.exit(1)
def open_input_file(filename):
"""Open file for reading.
Args:
filename: Name of input file to open or None to open stdin.
Returns:
Opened file if string provided, stdin if filename is None.
"""
# TODO(rafek): Detect missing or invalid files, generating user friendly
# error messages.
if filename is None:
return sys.stdin
else:
try:
return open(filename, 'rb')
except IOError, err:
fatal_error(str(err))
@util.positional(1)
def generate_file_descriptor(dest_dir, file_descriptor, force_overwrite):
"""Generate a single file descriptor to destination directory.
Will generate a single Python file from a file descriptor under dest_dir.
The sub-directory where the file is generated is determined by the package
name of descriptor.
Descriptors without package names will not be generated.
Descriptors that are part of the ProtoRPC distribution will not be generated.
Args:
dest_dir: Directory under which to generate files.
file_descriptor: FileDescriptor instance to generate source code from.
force_overwrite: If True, existing files will be overwritten.
"""
package = file_descriptor.package
if not package:
# TODO(rafek): Option to cause an error on this condition.
logging.warn('Will not generate descriptor without package name')
return
if package in EXCLUDED_PACKAGES:
logging.warn('Will not generate main ProtoRPC class %s' % package)
return
package_path = package.split('.')
directory = package_path[:-1]
package_file_name = package_path[-1]
directory_name = os.path.join(dest_dir, *directory)
output_file_name = os.path.join(directory_name,
'%s.py' % (package_file_name,))
try:
os.makedirs(directory_name)
except OSError, err:
if err.errno != errno.EEXIST:
raise
if not force_overwrite and os.path.exists(output_file_name):
logging.warn('Not overwriting %s with package %s',
output_file_name, package)
return
output_file = open(output_file_name, 'w')
logging.info('Writing package %s to %s',
file_descriptor.package, output_file_name)
generate_python.format_python_file(file_descriptor, output_file)
@util.positional(1)
def command(name, required=(), optional=()):
"""Decorator used for declaring commands used on command line.
Each command of this tool can have any number of sequential required
parameters and optional parameters. The required and optional parameters
will be displayed in the command usage. Arguments passed in to the command
are checked to ensure they have at least the required parameters and not
too many parameters beyond the optional ones. When there are not enough
or too few parameters the usage message is generated and the program exits
with an error code.
Functions decorated thus are added to commands by their name.
Resulting decorated functions will have required and optional attributes
assigned to them so that appear in the usage message.
Args:
name: Name of command that will follow the program name on the command line.
required: List of required parameter names as displayed in the usage
message.
optional: List of optional parameter names as displayed in the usage
message.
"""
def check_params_decorator(function):
def check_params_wrapper(options, *args):
if not (len(required) <= len(args) <= len(required) + len(optional)):
sys.stderr.write("Incorrect usage for command '%s'\n\n" % name)
usage()
function(options, *args)
check_params_wrapper.required = required
check_params_wrapper.optional = optional
commands[name] = check_params_wrapper
return check_params_wrapper
return check_params_decorator
@command('file', optional=['input-filename', 'output-filename'])
def file_command(options, input_filename=None, output_filename=None):
"""Generate a single descriptor file to Python.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileDescriptor from. If None
will read from stdin.
output_filename: File to write Python source code to. If None will
generate to stdout.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
if output_filename:
output_file = open(output_filename, 'w')
else:
output_file = sys.stdout
file_descriptor = protobuf.decode_message(descriptor.FileDescriptor,
descriptor_content)
generate_python.format_python_file(file_descriptor, output_file)
@command('fileset', optional=['filename'])
def fileset_command(options, input_filename=None):
"""Generate source directory structure from FileSet.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileSet from. If None will read from
stdin.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
dest_dir = os.path.expanduser(options.dest_dir)
if not os.path.isdir(dest_dir) and os.path.exists(dest_dir):
fatal_error("Destination '%s' is not a directory" % dest_dir)
file_set = protobuf.decode_message(descriptor.FileSet,
descriptor_content)
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
@command('registry',
required=['host'],
optional=['service-name', 'registry-path'])
def registry_command(options,
host,
service_name=None,
registry_path='/protorpc'):
"""Generate source directory structure from remote registry service.
Args:
options: Parsed command line options.
host: Web service host where registry service is located. May include
port.
service_name: Name of specific service to read. Will generate only Python
files that service is dependent on. If None, will generate source code
for all services known by the registry.
registry_path: Path to find registry if not the default 'protorpc'.
"""
dest_dir = os.path.expanduser(options.dest_dir)
url = 'http://%s%s' % (host, registry_path)
reg = registry.RegistryService.Stub(transport.HttpTransport(url))
if service_name is None:
service_names = [service.name for service in reg.services().services]
else:
service_names = [service_name]
file_set = reg.get_file_set(names=service_names).file_set
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
def make_opt_parser():
"""Create options parser with automatically generated command help.
Will iterate over all functions in commands and generate an appropriate
usage message for them with all their required and optional parameters.
"""
command_descriptions = []
for name in sorted(commands.iterkeys()):
command = commands[name]
params = ' '.join(['<%s>' % param for param in command.required] +
['[<%s>]' % param for param in command.optional])
command_descriptions.append('%%prog [options] %s %s' % (name, params))
command_usage = 'usage: %s\n' % '\n '.join(command_descriptions)
parser = optparse.OptionParser(usage=command_usage)
parser.add_option('-d', '--dest_dir',
dest='dest_dir',
default=os.getcwd(),
help='Write generated files to DIR',
metavar='DIR')
parser.add_option('-f', '--force',
action='store_true',
dest='force',
default=False,
help='Force overwrite of existing files')
return parser
parser = make_opt_parser()
def main():
# TODO(rafek): Customize verbosity.
logging.basicConfig(level=logging.INFO)
options, positional = parser.parse_args()
if not positional:
usage()
command_name = positional[0]
command = commands.get(command_name)
if not command:
sys.stderr.write("Unknown command '%s'\n\n" % command_name)
usage()
parameters = positional[1:]
command(options, *parameters)
if __name__ == '__main__':
main()
| bsd-3-clause |
gardner/urllib3 | test/test_filepost.py | 27 | 4190 | import unittest
from urllib3.filepost import encode_multipart_formdata, iter_fields
from urllib3.fields import RequestField
from urllib3.packages.six import b, u
BOUNDARY = '!! test boundary !!'
class TestIterfields(unittest.TestCase):
def test_dict(self):
for fieldname, value in iter_fields(dict(a='b')):
self.assertEqual((fieldname, value), ('a', 'b'))
self.assertEqual(
list(sorted(iter_fields(dict(a='b', c='d')))),
[('a', 'b'), ('c', 'd')])
def test_tuple_list(self):
for fieldname, value in iter_fields([('a', 'b')]):
self.assertEqual((fieldname, value), ('a', 'b'))
self.assertEqual(
list(iter_fields([('a', 'b'), ('c', 'd')])),
[('a', 'b'), ('c', 'd')])
class TestMultipartEncoding(unittest.TestCase):
def test_input_datastructures(self):
fieldsets = [
dict(k='v', k2='v2'),
[('k', 'v'), ('k2', 'v2')],
]
for fields in fieldsets:
encoded, _ = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded.count(b(BOUNDARY)), 3)
def test_field_encoding(self):
fieldsets = [
[('k', 'v'), ('k2', 'v2')],
[('k', b'v'), (u('k2'), b'v2')],
[('k', b'v'), (u('k2'), 'v2')],
]
for fields in fieldsets:
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k2"\r\n'
b'\r\n'
b'v2\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
, fields)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_filename(self):
fields = [('k', ('somename', b'v'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somename"\r\n'
b'Content-Type: application/octet-stream\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_textplain(self):
fields = [('k', ('somefile.txt', b'v'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somefile.txt"\r\n'
b'Content-Type: text/plain\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_explicit(self):
fields = [('k', ('somefile.txt', b'v', 'image/jpeg'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somefile.txt"\r\n'
b'Content-Type: image/jpeg\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_request_fields(self):
fields = [RequestField('k', b'v', filename='somefile.txt', headers={'Content-Type': 'image/jpeg'})]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Type: image/jpeg\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
| mit |
GladeRom/android_external_chromium_org | third_party/jinja2/nodes.py | 623 | 28875 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import next, izip, with_metaclass, text_type, \
method_type, function_type
#: the types we support for context functions
_context_function_types = (function_type, method_type)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| bsd-3-clause |
stefan-caraiman/python-lab | python/solutii/micu_matei/sync/sync.py | 7 | 6480 | #!/usr/bin/env python
"""
Syncronizam doua fisiere
"""
from __future__ import print_function
import os
import argparse
import shutil
from functii_auxiliare import get_hash
from functii_auxiliare import get_last_edit
from functii_auxiliare import write_sync_file
from functii_auxiliare import read_sync_file
from functii_auxiliare import make_dirs
from functii_auxiliare import copy_r
from functii_auxiliare import get_same_file
# standard keys in dict
FULL_PATH = "fullPath"
MD5 = "md5"
BASE_DIR = "baseDir"
LAST_EDIT = "lastEdit"
IS_FILE = "isFile"
def parse_directory(base_dir, path, prefix):
""" returneaza un dict in care cheile sunt pathuri relative la
<path>-ul primit ca parametru, fiecare valoare este alt dict
cu mai multi parametri, cum ar fi :
- full_path
- md5 ( daca este fisier )
- base_dir( directorul in care sincronizam )
- last_modified_date ( ultima data cand a fost modifica
daca e fisier )
- is_file ( True / False )
"""
if os.path.exists(path) and os.path.isdir(path):
info = {}
for item in os.listdir(path):
full_path_info = os.path.join(path, item)
relative_path_info = os.path.join(prefix, item)
if os.path.isfile(full_path_info):
info[relative_path_info] = {
FULL_PATH: full_path_info,
MD5: get_hash(full_path_info),
BASE_DIR: base_dir,
LAST_EDIT: get_last_edit(full_path_info),
IS_FILE: True
}
elif os.path.isdir(full_path_info):
info[relative_path_info] = {
FULL_PATH: full_path_info,
MD5: get_hash(full_path_info),
BASE_DIR: base_dir,
LAST_EDIT: get_last_edit(full_path_info),
IS_FILE: False
}
info_sub = parse_directory(base_dir,
full_path_info, relative_path_info)
info.update(info_sub)
return info
else:
return {}
def sync_new_files(info_a, path_a, info_b, path_b):
""" Sincronizeaza fisierele noi din fisierul A in fisierul B,
folosind informatiile fin info_a si info_b ."""
for item in info_a:
detalii_item = info_a[item]
full_path_item_in_a = os.path.join(path_a, item)
full_path_item_in_b = os.path.join(path_b, item)
if item not in info_b:
if detalii_item[IS_FILE]:
copy_r(full_path_item_in_a, full_path_item_in_b)
else:
make_dirs(full_path_item_in_b)
def sync_deleted_files(info_a, info_b, path_b):
""" Sincronizam fisierele deletate din fisierul A in fisierul B,
folosind informatiile din info_a, info_b si fisierele de
sincronizare daca acestea exista
Eliminam fisierele din B care au fost eliminate in A, daca acestea
existau deja in B inainte """
sync_b = read_sync_file(path_b)
if not sync_b:
return
for item in info_b:
if (item not in info_a) and (item in sync_b):
detalii_item = info_b[item]
if detalii_item[IS_FILE]:
os.remove(detalii_item[FULL_PATH])
else:
shutil.rmtree(detalii_item[FULL_PATH])
def sync_moved_files(info_a, info_b, path_b):
""" Verifica daca un fisier a fost mutat """
for item in info_a:
if info_a[item][IS_FILE]:
if item not in info_b:
old_file = get_same_file(info_a[item], info_b)
if old_file:
old_file = os.path.join(path_b, old_file)
new_path = os.path.join(path_b, item)
shutil.move(old_file, new_path)
def sync_modified_files(info_a, info_b):
""" syncronizam fisierele modificate din A in B"""
for item in info_a:
if item in info_b:
file_a = info_a[item]
file_b = info_b[item]
if file_a[MD5] != file_b[MD5]:
if file_a[LAST_EDIT] > file_b[LAST_EDIT]:
os.remove(file_b[FULL_PATH])
shutil.copy(file_a[FULL_PATH], file_b[FULL_PATH])
else:
os.remove(file_a[FULL_PATH])
shutil.copy(file_b[FULL_PATH], file_a[FULL_PATH])
def parse():
""" parseaza argumentele primite ca parametri """
args = argparse.ArgumentParser()
args.add_argument('firstDir', type=str, help="Primul fisier")
args.add_argument('secondDir', type=str, help="Al doilea fisier")
args = args.parse_args()
return args
def sync(path_a, path_b):
""" syncronizeaza fisierele din cele doua directoare """
# modified files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_modified_files(info_a, info_b)
# sync_modified_files(info_b, info_a)
# moved files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_moved_files(info_a, info_b, path_b)
sync_moved_files(info_b, info_a, path_a)
# delete files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_deleted_files(info_a, info_b, path_b)
sync_deleted_files(info_b, info_a, path_a)
# new files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_new_files(info_a, path_a, info_b, path_b)
sync_new_files(info_b, path_b, info_a, path_a)
# rescriem fisierele de sincronizare cu ultimele valori
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
write_sync_file(path_a, info_a)
write_sync_file(path_b, info_b)
def check_path(path):
""" verifica daca path exista si este valida"""
if not os.path.exists(path):
print("EROARE: ", path, " trebuie sa existe")
return False
elif not os.path.isdir(path):
print("EROARE: ", path, " trebuie sa existe")
return False
return True
def main():
""" syncronizeaza pentru todeauna """
args = parse()
path_a = os.path.abspath(args.firstDir)
path_b = os.path.abspath(args.secondDir)
check_path(path_a)
check_path(path_b)
while True:
sync(path_a, path_b)
if __name__ == "__main__":
main()
| mit |
dpac-vlsi/SynchroTrace | tests/quick/se/20.eio-short/test.py | 19 | 1766 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
root.system.cpu.workload = EioProcess(file = binpath('anagram',
'anagram-vshort.eio.gz'))
root.system.cpu.max_insts_any_thread = 500000
| bsd-3-clause |
SymbiFlow/edalize | edalize/xsim.py | 1 | 5680 | # Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os
import logging
from collections import OrderedDict
from edalize.edatool import Edatool
logger = logging.getLogger(__name__)
class Xsim(Edatool):
argtypes = ['plusarg', 'vlogdefine', 'vlogparam', 'generic']
MAKEFILE_TEMPLATE="""#Auto generated by Edalize
include config.mk
all: xsim.dir/$(TARGET)/xsimk
xsim.dir/$(TARGET)/xsimk:
xelab $(TOPLEVEL) -prj $(TARGET).prj -snapshot $(TARGET) $(VLOG_DEFINES) $(VLOG_INCLUDES) $(GEN_PARAMS) $(XELAB_OPTIONS)
run: xsim.dir/$(TARGET)/xsimk
xsim -R $(XSIM_OPTIONS) $(TARGET) $(EXTRA_OPTIONS)
run-gui: xsim.dir/$(TARGET)/xsimk
xsim --gui $(XSIM_OPTIONS) $(TARGET) $(EXTRA_OPTIONS)
"""
CONFIG_MK_TEMPLATE = """#Auto generated by Edalize
TARGET = {target}
TOPLEVEL = {toplevel}
VLOG_DEFINES = {vlog_defines}
VLOG_INCLUDES = {vlog_includes}
GEN_PARAMS = {gen_params}
XELAB_OPTIONS = {xelab_options}
XSIM_OPTIONS = {xsim_options}
"""
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {'description' : "XSim simulator from the Xilinx Vivado suite",
'members' : [
{'name' : 'compilation_mode',
'type' : 'String',
'desc' : 'Common or separate compilation, sep - for separate compilation, common - for common compilation'}],
'lists' : [
{'name' : 'xelab_options',
'type' : 'String',
'desc' : 'Additional options for compilation with xelab'},
{'name' : 'xsim_options',
'type' : 'String',
'desc' : 'Additional run options for XSim'},
]}
def configure_main(self):
self._write_config_files()
#Check if any VPI modules are present and display warning
if len(self.vpi_modules) > 0:
modules = [m['name'] for m in self.vpi_modules]
logger.error('VPI modules not supported by Xsim: %s' % ', '.join(modules))
def _write_config_files(self):
mfc = self.tool_options.get('compilation_mode') == 'common'
with open(os.path.join(self.work_root, self.name+'.prj'),'w') as f:
mfcu = []
(src_files, self.incdirs) = self._get_fileset_files()
for src_file in src_files:
cmd = ""
if src_file.file_type.startswith("verilogSource"):
cmd = 'verilog'
elif src_file.file_type == 'vhdlSource-2008':
cmd = 'vhdl2008'
elif src_file.file_type.startswith("vhdlSource"):
cmd = 'vhdl'
elif src_file.file_type.startswith("systemVerilogSource"):
if mfc:
mfcu.append(src_file.name)
else:
cmd = 'sv'
elif src_file.file_type in ["user"]:
pass
else:
_s = "{} has unknown file type '{}'"
logger.warning(_s.format(src_file.name, src_file.file_type))
if cmd:
if src_file.logical_name:
lib = src_file.logical_name
else:
lib = 'work'
f.write('{} {} {}\n'.format(cmd, lib, src_file.name))
if mfc:
f.write('sv work ' + ' '.join(mfcu))
with open(os.path.join(self.work_root, 'config.mk'), 'w') as f:
vlog_defines = ' '.join(['--define {}={}'.format(k,self._param_value_str(v)) for k,v, in self.vlogdefine.items()])
vlog_includes = ' '.join(['-i '+k for k in self.incdirs])
# Both parameters and generics use the same --generic_top argument
# so warn if there are overlapping values
common_vals = set(self.vlogparam).intersection(set(self.generic))
if common_vals != set():
_s = "Common values for vlogparam and generic: {}"
logger.warning(_s.format(common_vals))
gen_param = OrderedDict(self.vlogparam)
gen_param.update(self.generic)
gen_param_args = " ".join(
[
"--generic_top {}={}".format(k, self._param_value_str(v))
for k, v in gen_param.items()
]
)
xelab_options = ' '.join(self.tool_options.get('xelab_options', []))
xsim_options = ' '.join(self.tool_options.get('xsim_options' , []))
f.write(self.CONFIG_MK_TEMPLATE.format(target=self.name,
toplevel=self.toplevel,
vlog_defines = vlog_defines,
vlog_includes = vlog_includes,
gen_params = gen_param_args,
xelab_options = xelab_options,
xsim_options = xsim_options))
with open(os.path.join(self.work_root, 'Makefile'), 'w') as f:
f.write(self.MAKEFILE_TEMPLATE)
def run_main(self):
args = ['run']
# Plusargs
if self.plusarg:
_s = '--testplusarg {}={}'
args.append('EXTRA_OPTIONS='+' '.join([_s.format(k, v) for k,v in self.plusarg.items()]))
self._run_tool('make', args)
| bsd-2-clause |
shrids/kubernetes | build/json-extractor.py | 413 | 2111 | #!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a very simple utility that reads a JSON document from stdin, parses it
# and returns the specified value. The value is described using a simple dot
# notation. If any errors are encountered along the way, an error is output and
# a failure value is returned.
from __future__ import print_function
import json
import sys
def PrintError(*err):
print(*err, file=sys.stderr)
def main():
try:
obj = json.load(sys.stdin)
except Exception, e:
PrintError("Error loading JSON: {0}".format(str(e)))
if len(sys.argv) == 1:
# if we don't have a query string, return success
return 0
elif len(sys.argv) > 2:
PrintError("Usage: {0} <json query>".format(sys.args[0]))
return 1
query_list = sys.argv[1].split('.')
for q in query_list:
if isinstance(obj, dict):
if q not in obj:
PrintError("Couldn't find '{0}' in dict".format(q))
return 1
obj = obj[q]
elif isinstance(obj, list):
try:
index = int(q)
except:
PrintError("Can't use '{0}' to index into array".format(q))
return 1
if index >= len(obj):
PrintError("Index ({0}) is greater than length of list ({1})".format(q, len(obj)))
return 1
obj = obj[index]
else:
PrintError("Trying to query non-queryable object: {0}".format(q))
return 1
if isinstance(obj, str):
print(obj)
else:
print(json.dumps(obj, indent=2))
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
piagarwal11/GDriveLinuxClient | src/watchdog-0.8.2/src/watchdog/tricks/__init__.py | 11 | 5177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
import time
from watchdog.utils import echo, has_attribute
from watchdog.events import PatternMatchingEventHandler
class Trick(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
@classmethod
def generate_yaml(cls):
context = dict(module_name=cls.__module__,
klass_name=cls.__name__)
template_yaml = """- %(module_name)s.%(klass_name)s:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
return template_yaml % context
class LoggerTrick(Trick):
"""A simple trick that does only logs events."""
def on_any_event(self, event):
pass
@echo.echo
def on_modified(self, event):
pass
@echo.echo
def on_deleted(self, event):
pass
@echo.echo
def on_created(self, event):
pass
@echo.echo
def on_moved(self, event):
pass
class ShellCommandTrick(Trick):
"""Executes shell commands in response to matched events."""
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
ignore_directories=False, wait_for_process=False,
drop_during_process=False):
super(ShellCommandTrick, self).__init__(patterns, ignore_patterns,
ignore_directories)
self.shell_command = shell_command
self.wait_for_process = wait_for_process
self.drop_during_process = drop_during_process
self.process = None
def on_any_event(self, event):
from string import Template
if self.drop_during_process and self.process and self.process.poll() is None:
return
if event.is_directory:
object_type = 'directory'
else:
object_type = 'file'
context = {
'watch_src_path': event.src_path,
'watch_dest_path': '',
'watch_event_type': event.event_type,
'watch_object': object_type,
}
if self.shell_command is None:
if has_attribute(event, 'dest_path'):
context.update({'dest_path': event.dest_path})
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
else:
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
else:
if has_attribute(event, 'dest_path'):
context.update({'watch_dest_path': event.dest_path})
command = self.shell_command
command = Template(command).safe_substitute(**context)
self.process = subprocess.Popen(command, shell=True)
if self.wait_for_process:
self.process.wait()
class AutoRestartTrick(Trick):
"""Starts a long-running subprocess and restarts it on matched events.
The command parameter is a list of command arguments, such as
['bin/myserver', '-c', 'etc/myconfig.ini'].
Call start() after creating the Trick. Call stop() when stopping
the process.
"""
def __init__(self, command, patterns=None, ignore_patterns=None,
ignore_directories=False, stop_signal=signal.SIGINT,
kill_after=10):
super(AutoRestartTrick, self).__init__(
patterns, ignore_patterns, ignore_directories)
self.command = ['setsid'] + command
self.stop_signal = stop_signal
self.kill_after = kill_after
self.process = None
def start(self):
self.process = subprocess.Popen(self.command)
def stop(self):
if self.process is None:
return
try:
os.killpg(os.getpgid(self.process.pid), self.stop_signal)
except OSError:
# Process is already gone
pass
else:
kill_time = time.time() + self.kill_after
while time.time() < kill_time:
if self.process.poll() is not None:
break
time.sleep(0.25)
else:
try:
os.killpg(os.getpgid(self.process.pid), 9)
except OSError:
# Process is already gone
pass
self.process = None
@echo.echo
def on_any_event(self, event):
self.stop()
self.start()
| mit |
dogukantufekci/workplace_saas | workplace_saas/_apps/companies/models.py | 2 | 2644 | from django.db import models
from countries.models import Country
from people.models import Person
# Models -----------------------------------------
class Company(models.Model):
SOLE_TRADER = 1
CORPORATION = 2
TYPE_CHOICES = (
(SOLE_TRADER, 'Sole Trader'),
(CORPORATION, 'Corporation'),
)
name = models.CharField(
max_length=100,
)
type = models.PositiveSmallIntegerField(
choices=TYPE_CHOICES,
)
class SoleTrader(models.Model):
person = models.OneToOneField(
Person,
related_name='sole_trader',
)
class Corporation(models.Model):
registered_in = models.ForeignKey(
Country,
related_name='companies',
)
registration_number = models.CharField(
max_length=40,
)
class Meta:
unique_together = (('registered_in', 'registration_number'),)
class CompanyEmail(models.Model):
company = models.ForeignKey(
Company,
related_name='company_emails',
)
email = models.EmailField(
max_length=254,
)
class Meta:
unique_together = (('company', 'email',),)
class CompanyPhoneNumber(models.Model):
company = models.ForeignKey(
Company,
related_name='company_phone_numbers',
)
phone_number = models.ForeignKey(
PhoneNumber,
related_name='company_phone_numbers',
)
# Custom Models -----------------------------------------
class CustomCompany(models.Model):
name = models.CharField(
max_length=100,
)
type = models.PositiveSmallIntegerField(
choices=Company.TYPE_CHOICES,
)
class CustomSoleTrader(models.Model):
person = models.OneToOneField(
CustomPerson,
related_name='custom_sole_trader',
)
class CustomCorporation(models.Model):
registered_in = models.ForeignKey(
Country,
related_name='custom_corporations',
)
registration_number = models.CharField(
max_length=40,
)
class Meta:
unique_together = (('registered_in', 'registration_number'),)
class CustomCompanyEmail(models.Model):
custom_company = models.ForeignKey(
CustomCompany,
related_name='custom_company_emails',
)
email = models.EmailField(
max_length=254,
)
class Meta:
unique_together = (('company', 'email',),)
class CustomCompanyPhoneNumber(models.Model):
company = models.ForeignKey(
Company,
related_name='company_phone_numbers',
)
phone_number = models.ForeignKey(
PhoneNumber,
related_name='company_phone_numbers',
) | mit |
Comikris/Assignment_Interpreter | FileManagement/filehandler.py | 1 | 7873 | from FileManagement.interface_filehandler import *
# Brendan
import pickle
import os
import sys
import math
# kate
import re
from datetime import *
# Kris Little design
class FileHandler(IFileHandler):
def __init__(self):
self.valid = True
# Kris
def load_file(self, file):
# put error handling here
contents = []
try:
the_file = open(file, 'r')
except FileNotFoundError:
print("file does not exist.")
else:
for line in the_file:
line = tuple(line.replace('\n', "").split(','))
contents.append(line)
the_file.close()
return contents
# Kris
def write_file(self, file, data):
the_file = open(file, 'w')
string = ""
for l in data:
new_data = [l[0], l[1], l[2], l[3], l[4], l[5], l[6]]
for i in range(len(new_data)):
string += str(new_data[i])
# prevent a space at the end of a line
if i != len(new_data) - 1:
string += ','
string += "\n"
the_file.write(string)
the_file.close()
# validate input for date type
# KATE
def valid_date(self, birthday):
minyear = 1000
maxyear = date.today().year
mydate = birthday.split('-')
if len(mydate) == 3:
birthdate = mydate[0]
birthmonth = mydate[1]
birthyear = mydate[2]
print(birthyear)
if int(birthyear) > maxyear or int(birthyear) < minyear:
print(mydate)
birthdayobj = date(birthdate, birthmonth, birthyear)
return True
else:
print('Year is out of range')
# Validate date match year
# KATE
def valid_age(self, birthday):
today = date.today()
mydate = birthday
print(mydate)
try:
born = datetime.strptime(mydate, '%d%m%Y')
except ValueError:
pass
else:
age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))
return age
# Validate file data
def validate(self, data):
""" TestCase for validate
>>> aFileHandler = FileHandler()
>>> aFileHandler.validate([("e01","m","20","20","Normal","200","12-06-1998")])
invalidate data: e01
invalidate data: m
invalidate data: 20
invalidate data: 20
invalidate data: Normal
invalidate data: 200
invalidate data: 12-06-1998
"""
add_to = []
feedback = ""
for person in data:
feedback += "Feedback for data at: " + str(data.index(person) + 1) + "\n"
self.valid = True
print(person)
# check the format is a letter and 3 digit e.g A002 or a002
if re.match(r'[a-z][0-9]{3}', (person[0]).lower()):
# Kris
if len(str(person[0])) >= 5:
self.valid = False
else:
# Kris
feedback += "ID is incorrect; must contain a letter and 3 digits e.g. a001.\n"
self.valid = False
# check the format is either M/F/Male/Female
if person[1].upper() == "M" or (person[1]).upper() == "F":
print(person[1])
else:
# Kris
feedback += "Incorect Gender; must be M or F.\n"
self.valid = False
# CHECK DATE, THEN CHECK AGE..
# Kris
date_correct = True
try:
datetime.strptime(person[6], "%d-%m-%Y")
except ValueError:
date_correct = False
feedback += "Date is not corrent format! " + str(person[6])
self.valid = False
if date_correct:
the_date = datetime.strptime(person[6], "%d-%m-%Y")
test_age = math.floor(((datetime.today() - the_date).days/365))
if test_age == int(person[2]):
pass
else:
self.valid = False
feedback += "Age and birthday does not match. " + str(test_age) + ":" + str(int(person[2]))
# check sales is 3 interger value
if re.match(r'[0-9]{3}', person[3]):
print(person[3])
else:
feedback += "Incorrect sales number; must be a 3 digit whole number. \n"
self.valid = False
# check BMI is either Normal / Overweight / Obesity or Underweight
if re.match(r'\b(NORMAL|OVERWEIGHT|OBESITY|UNDERWEIGHT)\b', (person[4]).upper()):
print(person[4])
else:
feedback += "Incorrect BMI value; Choose from Normal, Overweight, Obesity or Underweight. \n"
self.valid = False
# check Income is float
try:
if int(person[5]):
if len(str(int(person[5]))) > 3:
feedback += "Income is too large."
self.valid = False
else:
pass
else:
feedback += "Incorrect income; must be an integer number. \n" + str(person[5])
self.valid = False
except ValueError:
self.valid = False
if self.valid:
add_to.append(person)
feedback += "Passed and added to database.\n"
else:
feedback += '\n\n'
print(feedback)
return add_to
# Brendan Holt
# Used to pickle the loaded graphs to default pickle file
def pack_pickle(self, graphs):
# Raises exception if the default file does not exits and creates it should this exception be raised
try:
realfilepath = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\files\\pickle.dat"
if not os.path.exists(realfilepath):
raise IOError
except IOError:
os.makedirs(os.path.dirname(realfilepath))
pass
# The pickle process
pickleout = open(realfilepath, "wb")
pickle.dump(graphs, pickleout)
pickleout.close()
# Brendan Holt
# Used to unpickle graphs in the pickle file and return them to the interpreters graph list
def unpack_pickle(self, filepath):
# Raises exception if for some reason the default file has been deleted
try:
if os.path.exists(filepath) is False:
raise IOError
except IOError:
print('File does not exits')
return
# The unpickle process
picklein = open(filepath, "rb")
graphs = pickle.load(picklein)
picklein.close()
# Return the graphs to the interpreter
return graphs
# Brendan Holt
# Used to pickle the entire database to default pickle file
def pickle_all(self, data):
# Raises exception if for some reason the default file has been deleted
try:
realfiledirectory = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\files\\"
if os.path.exists(realfiledirectory) is False:
raise IOError
except IOError:
os.makedirs(os.path.dirname(realfiledirectory))
return
# The pickle process
pickleout = open(realfiledirectory + "\\db_backup.dat", "wb")
pickle.dump(data, pickleout)
pickleout.close()
| apache-2.0 |
swfly/StreamCoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
chouseknecht/openshift-restclient-python | openshift/test/test_v1_security_context_constraints.py | 1 | 4347 | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_security_context_constraints import V1SecurityContextConstraints
class TestV1SecurityContextConstraints(unittest.TestCase):
""" V1SecurityContextConstraints unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecurityContextConstraints(self):
"""
Test V1SecurityContextConstraints
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_security_context_constraints.V1SecurityContextConstraints()
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rabipanda/tensorflow | tensorflow/contrib/py2tf/pyct/templates_test.py | 2 | 2067 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import compiler
from tensorflow.contrib.py2tf.pyct import templates
from tensorflow.python.platform import test
class TemplatesTest(test.TestCase):
def test_replace_variable(self):
template = """
def test_fn(a):
a += 1
a = 2 * a + 1
return b
"""
node = templates.replace(template, a='b')[0]
result = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_replace_function_name(self):
template = """
def fname(a):
a += 1
a = 2 * a + 1
return a
"""
node = templates.replace(template, fname='test_fn')[0]
result = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_code_block(self):
template = """
def test_fn(a):
block
return a
"""
node = templates.replace(
template,
block=[
gast.Assign([
gast.Name('a', None, None)
], gast.BinOp(gast.Name('a', None, None), gast.Add(), gast.Num(1))),
] * 2)[0]
result = compiler.ast_to_object(node)
self.assertEquals(3, result.test_fn(1))
if __name__ == '__main__':
test.main()
| apache-2.0 |
afrantzis/pixel-format-guide | tests/pfgtest.py | 1 | 3257 | # Copyright © 2017 Collabora Ltd.
#
# This file is part of pfg.
#
# pfg is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# pfg is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pfg. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alexandros Frantzis <[email protected]>
import unittest
import pfg
from pfg import util
class TestCase(unittest.TestCase):
def assertFormatMatches(self, format_str, data_type, native, memory_le, memory_be):
fd = pfg.describe(format_str)
self.assertEqual(data_type, fd.data_type)
self.assertEqual(native, fd.native)
self.assertEqual(memory_le, fd.memory_le)
self.assertEqual(memory_be, fd.memory_be)
def assertFormatMatchesUnorm(self, format_str, native, memory_le, memory_be):
self.assertFormatMatches(format_str, "UNORM", native, memory_le, memory_be)
def assertFormatIsUnknown(self, format_str):
self.assertIsNone(pfg.describe(format_str))
def assertFindCompatibleMatches(self, format_str, family_str,
everywhere, little_endian, big_endian,
treat_x_as_a=False,
treat_srgb_as_unorm=False,
ignore_data_types=False):
compatibility = pfg.find_compatible(
format_str, family_str,
treat_x_as_a=treat_x_as_a,
treat_srgb_as_unorm=treat_srgb_as_unorm,
ignore_data_types=ignore_data_types)
# assertCountEqual checks for the existence of items regardless
# of order (and has a misleading name...)
self.assertCountEqual(everywhere, compatibility.everywhere)
self.assertCountEqual(little_endian, compatibility.little_endian)
self.assertCountEqual(big_endian, compatibility.big_endian)
def assertHasDocumentationFor(self, family):
documentation = pfg.document(family)
self.assertEqual(util.read_documentation(family + ".md"), documentation)
def R(m,l): return util.component_bits("R", m, l)
def G(m,l): return util.component_bits("G", m, l)
def B(m,l): return util.component_bits("B", m, l)
def A(m,l): return util.component_bits("A", m, l)
def X(m,l): return util.component_bits("X", m, l)
def Y(m,l): return util.component_bits("Y", m, l)
def U(m,l): return util.component_bits("U", m, l)
def V(m,l): return util.component_bits("V", m, l)
def C(m,l): return util.component_bits("C", m, l)
def Rn(n,m,l): return util.component_bits("(R+%d)" % n, m, l)
def Gn(n,m,l): return util.component_bits("(G+%d)" % n, m, l)
def Bn(n,m,l): return util.component_bits("(B+%d)" % n, m, l)
def An(n,m,l): return util.component_bits("(A+%d)" % n, m, l)
def Cn(n,m,l): return util.component_bits("(C+%d)" % n, m, l)
| lgpl-2.1 |
ypwalter/fxos-certsuite | mcts/webapi_tests/telephony/test_telephony_outgoing.py | 6 | 2891 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from mcts.webapi_tests.semiauto import TestCase
from mcts.webapi_tests.telephony import TelephonyTestCommon
class TestTelephonyOutgoing(TestCase, TelephonyTestCommon):
"""
This is a test for the `WebTelephony API`_ which will:
- Disable the default gaia dialer, so that the test app can handle calls
- Ask the test user to specify a destination phone number for the test call
- Setup mozTelephonyCall event listeners for the outgoing call
- Use the API to initiate the outgoing call
- Ask the test user to answer the call on the destination phone
- Keep the call active for 5 seconds, then hang up the call via the API
- Verify that the corresponding mozTelephonyCall events were triggered
- Re-enable the default gaia dialer
.. _`WebTelephony API`: https://developer.mozilla.org/en-US/docs/Web/Guide/API/Telephony
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
TelephonyTestCommon.__init__(self)
def setUp(self):
self.addCleanup(self.clean_up)
super(TestTelephonyOutgoing, self).setUp()
self.wait_for_obj("window.navigator.mozTelephony")
# disable the default dialer manager so it doesn't grab our calls
self.disable_dialer()
def test_telephony_outgoing(self):
# use the webapi to make an outgoing call to user-specified number
self.user_guided_outgoing_call()
# verify one outgoing call
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 call")
self.assertEqual(self.calls['0'], self.outgoing_call)
# have user answer the call on target
self.answer_call(incoming=False)
# keep call active for a while
time.sleep(5)
# verify the active call
self.assertEqual(self.active_call_list[0]['number'], self.outgoing_call['number'])
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 active call")
self.assertEqual(self.active_call_list[0]['state'], "connected", "Call state should be 'connected'")
# disconnect the active call
self.hangup_call()
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 0, "There should be 0 calls")
def clean_up(self):
# re-enable the default dialer manager
self.enable_dialer()
self.active_call_list = []
| mpl-2.0 |
Batterfii/django | django/contrib/gis/maps/google/overlays.py | 151 | 11955 | from __future__ import unicode_literals
from functools import total_ordering
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#event
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
https://developers.google.com/maps/documentation/javascript/reference#Icon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Marker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| bsd-3-clause |
EricCline/CEM_inc | env/lib/python2.7/site-packages/south/tests/logic.py | 127 | 33513 | from south.tests import unittest
import datetime
import sys
try:
set # builtin, python >=2.6
except NameError:
from sets import Set as set # in stdlib, python >=2.3
from south import exceptions
from south.migration import migrate_app
from south.migration.base import all_migrations, Migrations
from south.creator.changes import ManualChanges
from south.migration.utils import depends, flatten, get_app_label
from south.models import MigrationHistory
from south.tests import Monkeypatcher
from south.db import db
class TestBrokenMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"]
def test_broken_dependencies(self):
self.assertRaises(
exceptions.DependsOnUnmigratedApplication,
Migrations.calculate_dependencies,
force=True,
)
#depends_on_unknown = self.brokenapp['0002_depends_on_unknown']
#self.assertRaises(exceptions.DependsOnUnknownMigration,
# depends_on_unknown.dependencies)
#depends_on_higher = self.brokenapp['0003_depends_on_higher']
#self.assertRaises(exceptions.DependsOnHigherMigration,
# depends_on_higher.dependencies)
class TestMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigration, self).setUp()
self.fakeapp = Migrations('fakeapp')
self.otherfakeapp = Migrations('otherfakeapp')
Migrations.calculate_dependencies(force=True)
def test_str(self):
migrations = [str(m) for m in self.fakeapp]
self.assertEqual(['fakeapp:0001_spam',
'fakeapp:0002_eggs',
'fakeapp:0003_alter_spam'],
migrations)
def test_repr(self):
migrations = [repr(m) for m in self.fakeapp]
self.assertEqual(['<Migration: fakeapp:0001_spam>',
'<Migration: fakeapp:0002_eggs>',
'<Migration: fakeapp:0003_alter_spam>'],
migrations)
def test_app_label(self):
self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'],
[m.app_label() for m in self.fakeapp])
def test_name(self):
self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'],
[m.name() for m in self.fakeapp])
def test_full_name(self):
self.assertEqual(['fakeapp.migrations.0001_spam',
'fakeapp.migrations.0002_eggs',
'fakeapp.migrations.0003_alter_spam'],
[m.full_name() for m in self.fakeapp])
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
self.assertEqual([M1, M2, M3],
[m.migration().Migration for m in self.fakeapp])
self.assertRaises(exceptions.UnknownMigration,
self.fakeapp['9999_unknown'].migration)
def test_previous(self):
self.assertEqual([None,
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']],
[m.previous() for m in self.fakeapp])
def test_dependencies(self):
"Test that the dependency detection works."
self.assertEqual([
set([]),
set([self.fakeapp['0001_spam']]),
set([self.fakeapp['0002_eggs']])
],
[m.dependencies for m in self.fakeapp],
)
self.assertEqual([
set([self.fakeapp['0001_spam']]),
set([self.otherfakeapp['0001_first']]),
set([
self.otherfakeapp['0002_second'],
self.fakeapp['0003_alter_spam'],
])
],
[m.dependencies for m in self.otherfakeapp],
)
def test_forwards_plan(self):
self.assertEqual([
[self.fakeapp['0001_spam']],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']
],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
]
],
[m.forwards_plan() for m in self.fakeapp],
)
self.assertEqual([
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
self.otherfakeapp['0003_third'],
]
],
[m.forwards_plan() for m in self.otherfakeapp],
)
def test_is_before(self):
F1 = self.fakeapp['0001_spam']
F2 = self.fakeapp['0002_eggs']
F3 = self.fakeapp['0003_alter_spam']
O1 = self.otherfakeapp['0001_first']
O2 = self.otherfakeapp['0002_second']
O3 = self.otherfakeapp['0003_third']
self.assertTrue(F1.is_before(F2))
self.assertTrue(F1.is_before(F3))
self.assertTrue(F2.is_before(F3))
self.assertEqual(O3.is_before(O1), False)
self.assertEqual(O3.is_before(O2), False)
self.assertEqual(O2.is_before(O2), False)
self.assertEqual(O2.is_before(O1), False)
self.assertEqual(F2.is_before(O1), None)
self.assertEqual(F2.is_before(O2), None)
self.assertEqual(F2.is_before(O3), None)
class TestMigrationDependencies(Monkeypatcher):
installed_apps = ['deps_a', 'deps_b', 'deps_c']
def setUp(self):
super(TestMigrationDependencies, self).setUp()
self.deps_a = Migrations('deps_a')
self.deps_b = Migrations('deps_b')
self.deps_c = Migrations('deps_c')
Migrations.calculate_dependencies(force=True)
def test_dependencies(self):
self.assertEqual(
[
set([]),
set([self.deps_a['0001_a']]),
set([self.deps_a['0002_a']]),
set([
self.deps_a['0003_a'],
self.deps_b['0003_b'],
]),
set([self.deps_a['0004_a']]),
],
[m.dependencies for m in self.deps_a],
)
self.assertEqual(
[
set([]),
set([
self.deps_b['0001_b'],
self.deps_a['0002_a']
]),
set([
self.deps_b['0002_b'],
self.deps_a['0003_a']
]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b']]),
],
[m.dependencies for m in self.deps_b],
)
self.assertEqual(
[
set([]),
set([self.deps_c['0001_c']]),
set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([
self.deps_c['0004_c'],
self.deps_a['0002_a']
]),
],
[m.dependencies for m in self.deps_c],
)
def test_dependents(self):
self.assertEqual([set([self.deps_a['0002_a']]),
set([self.deps_c['0005_c'],
self.deps_b['0002_b'],
self.deps_a['0003_a']]),
set([self.deps_b['0003_b'],
self.deps_a['0004_a']]),
set([self.deps_a['0005_a']]),
set([])],
[m.dependents for m in self.deps_a])
self.assertEqual([set([self.deps_b['0002_b']]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b'],
self.deps_a['0004_a']]),
set([self.deps_b['0005_b']]),
set([])],
[m.dependents for m in self.deps_b])
self.assertEqual([set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([self.deps_c['0004_c']]),
set([self.deps_c['0005_c']]),
set([])],
[m.dependents for m in self.deps_c])
def test_forwards_plan(self):
self.assertEqual([[self.deps_a['0001_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_a['0003_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a'],
self.deps_a['0005_a']]],
[m.forwards_plan() for m in self.deps_a])
self.assertEqual([[self.deps_b['0001_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b'],
self.deps_b['0005_b']]],
[m.forwards_plan() for m in self.deps_b])
self.assertEqual([[self.deps_c['0001_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_c['0005_c']]],
[m.forwards_plan() for m in self.deps_c])
def test_backwards_plan(self):
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
self.deps_a['0001_a'],
],
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_a['0003_a'],
],
[
self.deps_a['0005_a'],
self.deps_a['0004_a'],
],
[
self.deps_a['0005_a'],
]
], [m.backwards_plan() for m in self.deps_a])
self.assertEqual([
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_b['0001_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
],
[
self.deps_b['0005_b'],
],
], [m.backwards_plan() for m in self.deps_b])
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
self.deps_c['0001_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
],
[self.deps_c['0005_c']]
], [m.backwards_plan() for m in self.deps_c])
class TestCircularDependencies(Monkeypatcher):
installed_apps = ["circular_a", "circular_b"]
def test_plans(self):
Migrations.calculate_dependencies(force=True)
circular_a = Migrations('circular_a')
circular_b = Migrations('circular_b')
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].backwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].backwards_plan,
)
class TestMigrations(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_all(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
M2 = Migrations(__import__("otherfakeapp", {}, {}, ['']))
self.assertEqual(
[M1, M2],
list(all_migrations()),
)
def test(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
self.assertEqual(M1, Migrations("fakeapp"))
self.assertEqual(M1, Migrations(self.create_fake_app("fakeapp")))
def test_application(self):
fakeapp = Migrations("fakeapp")
application = __import__("fakeapp", {}, {}, [''])
self.assertEqual(application, fakeapp.application)
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration['0001_spam'].migration().Migration)
self.assertEqual(M2, migration['0002_eggs'].migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration['0001_jam'].migration)
def test_guess_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration.guess_migration("0001_spam").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_spa").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_sp").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_s").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001").migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001-spam")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "000")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_spams")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_jam")
def test_app_label(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual(names,
[Migrations(n).app_label() for n in names])
def test_full_name(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual([n + '.migrations' for n in names],
[Migrations(n).full_name() for n in names])
class TestMigrationLogic(Monkeypatcher):
"""
Tests if the various logic functions in migration actually work.
"""
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigrationLogic, self).setUp()
MigrationHistory.objects.all().delete()
def assertListEqual(self, list1, list2, msg=None):
list1 = set(list1)
list2 = set(list2)
return self.assert_(list1 == list2, "%s is not equal to %s" % (list1, list2))
def test_find_ghost_migrations(self):
pass
def test_apply_migrations(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Apply them normally
migrate_app(migrations, target_name=None, fake=False,
load_initial_data=True)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_migration_merge_forwards(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Insert one in the wrong order
MigrationHistory.objects.create(app_name = "fakeapp",
migration = "0002_eggs",
applied = datetime.datetime.now())
# Did it go in?
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply them normally
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name=None, fake=False)
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name='zero', fake=False)
try:
migrate_app(migrations, target_name=None, fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
try:
migrate_app(migrations, target_name="zero", fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
# Nothing should have changed (no merge mode!)
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply with merge
migrate_app(migrations, target_name=None, merge=True, fake=False)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="0002", fake=False)
migrate_app(migrations, target_name="0001", fake=True)
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_alter_column_null(self):
def null_ok(eat_exception=True):
from django.db import connection, transaction
# the DBAPI introspection module fails on postgres NULLs.
cursor = connection.cursor()
# SQLite has weird now()
if db.backend_name == "sqlite3":
now_func = "DATETIME('NOW')"
# So does SQLServer... should we be using a backend attribute?
elif db.backend_name == "pyodbc":
now_func = "GETDATE()"
elif db.backend_name == "oracle":
now_func = "SYSDATE"
else:
now_func = "NOW()"
try:
if db.backend_name == "pyodbc":
cursor.execute("SET IDENTITY_INSERT southtest_spam ON;")
cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, NULL, %s, 'whatever');" % now_func)
except:
if eat_exception:
transaction.rollback()
return False
else:
raise
else:
cursor.execute("DELETE FROM southtest_spam")
transaction.commit()
return True
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# by default name is NOT NULL
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok())
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# after 0003, it should be NULL
migrate_app(migrations, target_name="0003", fake=False)
self.assert_(null_ok(False))
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# make sure it is NOT NULL again
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok(), 'weight not null after migration')
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# finish with no migrations, otherwise other tests fail...
migrate_app(migrations, target_name="zero", fake=False)
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_dependencies(self):
fakeapp = Migrations("fakeapp")
otherfakeapp = Migrations("otherfakeapp")
# Test a simple path
self.assertEqual([fakeapp['0001_spam'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam']],
fakeapp['0003_alter_spam'].forwards_plan())
# And a complex one.
self.assertEqual(
[
fakeapp['0001_spam'],
otherfakeapp['0001_first'],
otherfakeapp['0002_second'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam'],
otherfakeapp['0003_third']
],
otherfakeapp['0003_third'].forwards_plan(),
)
class TestMigrationUtils(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_get_app_label(self):
self.assertEqual(
"southtest",
get_app_label(self.create_fake_app("southtest.models")),
)
self.assertEqual(
"baz",
get_app_label(self.create_fake_app("foo.bar.baz.models")),
)
class TestUtils(unittest.TestCase):
def test_flatten(self):
self.assertEqual([], list(flatten(iter([]))))
self.assertEqual([], list(flatten(iter([iter([]), ]))))
self.assertEqual([1], list(flatten(iter([1]))))
self.assertEqual([1, 2], list(flatten(iter([1, 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1]), 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1, 2])]))))
self.assertEqual([1, 2, 3], list(flatten(iter([iter([1, 2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten(iter([iter([1]), iter([2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten([[1], [2], 3])))
def test_depends(self):
graph = {'A1': []}
self.assertEqual(['A1'],
depends('A1', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B1'],
'B1': []}
self.assertEqual(
['B1', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1']}
self.assertEqual(
['B1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1', 'B1'],
'A3': ['A2'],
'B1': ['A1']}
self.assertEqual(['A1', 'B1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1', 'C1'],
'C1': ['B1']}
self.assertEqual(
['B1', 'C1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2', 'A1', 'C1'],
'B1': ['A1'],
'B2': ['B1', 'C2', 'A1'],
'C1': ['B1'],
'C2': ['C1', 'A1'],
'C3': ['C2']}
self.assertEqual(
['A1', 'B1', 'C1', 'C2', 'B2', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
def assertCircularDependency(self, trace, target, graph):
"Custom assertion that checks a circular dependency is detected correctly."
self.assertRaises(
exceptions.CircularDependency,
depends,
target,
lambda n: graph[n],
)
try:
depends(target, lambda n: graph[n])
except exceptions.CircularDependency as e:
self.assertEqual(trace, e.trace)
def test_depends_cycle(self):
graph = {'A1': ['A1']}
self.assertCircularDependency(
['A1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'A2'],
'A3': ['A2']}
self.assertCircularDependency(
['A2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A3'],
'A4': ['A3']}
self.assertCircularDependency(
['A3', 'A3'],
'A4',
graph,
)
graph = {'A1': ['B1'],
'B1': ['A1']}
self.assertCircularDependency(
['A1', 'B1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B2'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B3'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B3', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2'],
'A4': ['A3'],
'B1': ['A3'],
'B2': ['B1']}
self.assertCircularDependency(
['A3', 'B2', 'B1', 'A3'],
'A4',
graph,
)
class TestManualChanges(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_suggest_name(self):
migrations = Migrations('fakeapp')
change = ManualChanges(migrations,
[],
['fakeapp.slug'],
[])
self.assertEquals(change.suggest_name(),
'add_field_fakeapp_slug')
change = ManualChanges(migrations,
[],
[],
['fakeapp.slug'])
self.assertEquals(change.suggest_name(),
'add_index_fakeapp_slug')
| mit |
GioneeDevTeam/android_kernel_gionee_msm8974 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Coder-Yu/RecQ | algorithm/ranking/BPR.py | 2 | 5528 | #coding:utf8
from baseclass.IterativeRecommender import IterativeRecommender
from random import choice
from tool.qmath import sigmoid
from math import log
from collections import defaultdict
#import tensorflow as tf
class BPR(IterativeRecommender):
# BPR:Bayesian Personalized Ranking from Implicit Feedback
# Steffen Rendle,Christoph Freudenthaler,Zeno Gantner and Lars Schmidt-Thieme
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(BPR, self).__init__(conf,trainingSet,testSet,fold)
# def readConfiguration(self):
# super(BPR, self).readConfiguration()
def initModel(self):
super(BPR, self).initModel()
def buildModel(self):
print 'Preparing item sets...'
self.PositiveSet = defaultdict(dict)
#self.NegativeSet = defaultdict(list)
for user in self.data.user:
for item in self.data.trainSet_u[user]:
if self.data.trainSet_u[user][item] >= 1:
self.PositiveSet[user][item] = 1
# else:
# self.NegativeSet[user].append(item)
print 'training...'
iteration = 0
itemList = self.data.item.keys()
while iteration < self.maxIter:
self.loss = 0
for user in self.PositiveSet:
u = self.data.user[user]
for item in self.PositiveSet[user]:
i = self.data.item[item]
item_j = choice(itemList)
while (self.PositiveSet[user].has_key(item_j)):
item_j = choice(itemList)
j = self.data.item[item_j]
self.optimization(u,i,j)
self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum()
iteration += 1
if self.isConverged(iteration):
break
def optimization(self,u,i,j):
s = sigmoid(self.P[u].dot(self.Q[i]) - self.P[u].dot(self.Q[j]))
self.P[u] += self.lRate * (1 - s) * (self.Q[i] - self.Q[j])
self.Q[i] += self.lRate * (1 - s) * self.P[u]
self.Q[j] -= self.lRate * (1 - s) * self.P[u]
self.P[u] -= self.lRate * self.regU * self.P[u]
self.Q[i] -= self.lRate * self.regI * self.Q[i]
self.Q[j] -= self.lRate * self.regI * self.Q[j]
self.loss += -log(s)
def predict(self,user,item):
if self.data.containsUser(user) and self.data.containsItem(item):
u = self.data.getUserId(user)
i = self.data.getItemId(item)
predictRating = sigmoid(self.Q[i].dot(self.P[u]))
return predictRating
else:
return sigmoid(self.data.globalMean)
def next_batch(self):
batch_id=0
while batch_id<self.train_size:
if batch_id+self.batch_size<=self.train_size:
users = [self.data.trainingData[idx][0] for idx in range(batch_id,self.batch_size+batch_id)]
items = [self.data.trainingData[idx][1] for idx in range(batch_id,self.batch_size+batch_id)]
batch_id+=self.batch_size
else:
users = [self.data.trainingData[idx][0] for idx in range(batch_id, self.train_size)]
items = [self.data.trainingData[idx][1] for idx in range(batch_id, self.train_size)]
batch_id=self.train_size
u_idx,i_idx,j_idx = [],[],[]
item_list = self.data.item.keys()
for i,user in enumerate(users):
i_idx.append(self.data.item[items[i]])
u_idx.append(self.data.user[user])
neg_item = choice(item_list)
while neg_item in self.data.trainSet_u[user]:
neg_item = choice(item_list)
j_idx.append(self.data.item[neg_item])
yield u_idx,i_idx,j_idx
def buildModel_tf(self):
super(BPR, self).buildModel_tf()
self.neg_idx = tf.placeholder(tf.int32, name="neg_holder")
self.neg_item_embedding = tf.nn.embedding_lookup(self.V, self.neg_idx)
y = tf.reduce_sum(tf.multiply(self.user_embedding,self.item_embedding),1)\
-tf.reduce_sum(tf.multiply(self.user_embedding,self.neg_item_embedding),1)
loss = -tf.reduce_sum(tf.log(tf.sigmoid(y))) + self.regU * (tf.nn.l2_loss(self.user_embedding) +
tf.nn.l2_loss(self.item_embedding) +
tf.nn.l2_loss(self.neg_item_embedding))
opt = tf.train.AdamOptimizer(self.lRate)
train = opt.minimize(loss)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for iteration in range(self.maxIter):
for n,batch in enumerate(self.next_batch()):
user_idx, i_idx, j_idx = batch
_, l = sess.run([train, loss], feed_dict={self.u_idx: user_idx, self.neg_idx: j_idx,self.v_idx: i_idx})
print 'training:', iteration + 1, 'batch', n, 'loss:', l
self.P,self.Q = sess.run([self.U,self.V])
def predictForRanking(self, u):
'invoked to rank all the items for the user'
if self.data.containsUser(u):
u = self.data.getUserId(u)
return self.Q.dot(self.P[u])
else:
return [self.data.globalMean] * self.num_items
| gpl-3.0 |
yatinkumbhare/openstack-nova | tools/regression_tester.py | 101 | 3538 | #!/usr/bin/env python
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tool for checking if patch contains a regression test.
By default runs against current patch but can be set to use any gerrit review
as specified by change number (uses 'git review -d').
Idea: take tests from patch to check, and run against code from previous patch.
If new tests pass, then no regression test, if new tests fails against old code
then either
* new tests depend on new code and cannot confirm regression test is valid
(false positive)
* new tests detects the bug being fixed (detect valid regression test)
Due to the risk of false positives, the results from this need some human
interpretation.
"""
from __future__ import print_function
import optparse
import string
import subprocess
import sys
def run(cmd, fail_ok=False):
print("running: %s" % cmd)
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
obj.wait()
if obj.returncode != 0 and not fail_ok:
print("The above command terminated with an error.")
sys.exit(obj.returncode)
return obj.stdout.read()
def main():
usage = """
Tool for checking if a patch includes a regression test.
Usage: %prog [options]"""
parser = optparse.OptionParser(usage)
parser.add_option("-r", "--review", dest="review",
help="gerrit review number to test")
(options, args) = parser.parse_args()
if options.review:
original_branch = run("git rev-parse --abbrev-ref HEAD")
run("git review -d %s" % options.review)
else:
print ("no gerrit review number specified, running on latest commit"
"on current branch.")
test_works = False
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
# identify which tests have changed
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
"| cut -f2").split()
test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
if test_list == []:
test_works = False
expect_failure = ""
else:
# run new tests, expect them to fail
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
fail_ok=True)
if "FAILED (id=" in expect_failure:
test_works = True
# cleanup
run("git checkout HEAD nova")
if options.review:
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
run("git checkout %s" % original_branch)
run("git branch -D %s" % new_branch)
print(expect_failure)
print("")
print("*******************************")
if test_works:
print("FOUND a regression test")
else:
print("NO regression test")
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 |
uni-peter-zheng/tp-qemu | qemu/tests/openflow_acl_test.py | 2 | 18485 | import logging
import re
import os
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest import utils_net, utils_test, utils_misc
from virttest import aexpect
from virttest import remote
from virttest import data_dir
@error.context_aware
def run(test, params, env):
"""
Test Step:
1. Boot up guest using the openvswitch bridge
2. Setup related service in test enviroment(http, ftp etc.)(optional)
3. Access the service in guest
4. Setup access control rules in ovs to disable the access
5. Access the service in guest
6. Setup access control rules in ovs to enable the access
7. Access the service in guest
8. Delete the access control rules in ovs
9. Access the service in guest
Params:
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def access_service(access_sys, access_targets, disabled, host_ip,
ref=False):
err_msg = ""
err_type = ""
for asys in access_sys:
for atgt in access_targets:
logging.debug("Try to access target %s from %s" % (atgt, asys))
access_params = access_sys[asys]
atgt_disabled = access_params['disabled_%s' % atgt]
if asys in vms_tags:
vm = env.get_vm(asys)
session = vm.wait_for_login(timeout=timeout)
run_func = session.cmd
remote_src = vm
ssh_src_ip = vm.get_address()
else:
run_func = utils.system_output
remote_src = "localhost"
ssh_src_ip = host_ip
if atgt in vms_tags:
vm = env.get_vm(atgt)
access_re_sub_string = vm.wait_for_get_address(0)
else:
access_re_sub_string = host_ip
access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
access_params['access_cmd'])
ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
access_params['ref_cmd'])
if access_cmd in ["ssh", "telnet"]:
if atgt in vms_tags:
target_vm = env.get_vm(atgt)
target_ip = target_vm.get_address()
else:
target_vm = "localhost"
target_ip = host_ip
out = ""
out_err = ""
try:
out = remote_login(access_cmd, target_ip,
remote_src, params, host_ip)
stat = 0
except remote.LoginError, err:
stat = 1
out_err = "Failed to login %s " % atgt
out_err += "from %s, err: %s" % (asys, err.output)
try:
out += remote_login(access_cmd, ssh_src_ip,
target_vm, params, host_ip)
except remote.LoginError, err:
stat += 1
out_err += "Failed to login %s " % asys
out_err += "from %s, err: %s" % (atgt, err.output)
if out_err:
out = out_err
else:
try:
out = run_func(access_cmd, timeout=op_timeout)
stat = 0
check_string = access_params.get("check_from_output")
if check_string and check_string in out:
stat = 1
except (aexpect.ShellCmdError, error.CmdError,
aexpect.ShellTimeoutError), err:
if isinstance(err, error.CmdError):
out = err.result_obj.stderr
stat = err.result_obj.exit_status
else:
out = err.output
if isinstance(err, aexpect.ShellTimeoutError):
stat = 1
session.close()
session = vm.wait_for_login(timeout=timeout)
run_func = session.cmd
else:
stat = err.status
if access_params.get("clean_cmd"):
try:
run_func(access_params['clean_cmd'])
except Exception:
pass
if disabled and atgt_disabled and stat == 0:
err_msg += "Still can access %s after" % atgt
err_msg += " disable it from ovs. "
err_msg += "Command: %s. " % access_cmd
err_msg += "Output: %s" % out
if disabled and atgt_disabled and stat != 0:
logging.debug("Can not access target as expect.")
if not disabled and stat != 0:
if ref:
err_msg += "Can not access %s at the" % atgt
err_msg += " beginning. Please check your setup."
err_type = "ref"
else:
err_msg += "Still can not access %s" % atgt
err_msg += " after enable the access"
err_msg += "Command: %s. " % access_cmd
err_msg += "Output: %s" % out
if err_msg:
session.close()
if err_type == "ref":
raise error.TestNAError(err_msg)
raise error.TestFail(err_msg)
if not ref_cmd:
session.close()
return
try:
out = run_func(ref_cmd, timeout=op_timeout)
stat = 0
except (aexpect.ShellCmdError, error.CmdError,
aexpect.ShellTimeoutError), err:
if isinstance(err, error.CmdError):
out = err.result_obj.stderr
stat = err.result_obj.exit_status
else:
out = err.output
if isinstance(err, aexpect.ShellTimeoutError):
stat = 1
else:
stat = err.status
if stat != 0:
if ref:
err_msg += "Refernce command failed at beginning."
err_type = "ref"
else:
err_msg += "Refernce command failed after setup"
err_msg += " the rules"
err_msg += "Command: %s. " % ref_cmd
err_msg += "Output: %s" % out
if err_msg:
session.close()
if err_type == "ref":
raise error.TestNAError(err_msg)
raise error.TestFail(err_msg)
session.close()
def get_acl_cmd(protocol, in_port, action, extra_options):
acl_cmd = protocol.strip()
acl_cmd += ",in_port=%s" % in_port.strip()
if extra_options.strip():
acl_cmd += ",%s" % ",".join(extra_options.strip().split())
if action.strip():
acl_cmd += ",action=%s" % action.strip()
return acl_cmd
def acl_rules_check(acl_rules, acl_setup_cmd):
acl_setup_cmd = re.sub("action=", "actions=", acl_setup_cmd)
acl_option = re.split(",", acl_setup_cmd)
for line in acl_rules.splitlines():
rule = [_.lower() for _ in re.split("[ ,]", line) if _]
item_in_rule = 0
for acl_item in acl_option:
if acl_item.lower() in rule:
item_in_rule += 1
if item_in_rule == len(acl_option):
return True
return False
def remote_login(client, host, src, params_login, host_ip):
src_name = src
if src != "localhost":
src_name = src.name
logging.info("Login %s from %s" % (host, src))
port = params_login["target_port"]
username = params_login["username"]
password = params_login["password"]
prompt = params_login["shell_prompt"]
linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n"))
quit_cmd = params.get("quit_cmd", "exit")
if host == host_ip:
# Try to login from guest to host.
prompt = "^\[.*\][\#\$]\s*$"
linesep = "\n"
username = params_login["host_username"]
password = params_login["host_password"]
quit_cmd = "exit"
if client == "ssh":
# We only support ssh for Linux in this test
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -p %s %s@%s" %
(port, username, host))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
else:
raise remote.LoginBadClientError(client)
if src == "localhost":
logging.debug("Login with command %s" % cmd)
session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
else:
if params_login.get("os_type") == "windows":
if client == "telnet":
cmd = "C:\\telnet.py %s %s " % (host, username)
cmd += "%s \"%s\" && " % (password, prompt)
cmd += "C:\\wait_for_quit.py"
cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd
else:
cmd += " || sleep 5"
session = src.wait_for_login()
logging.debug("Sending login command: %s" % cmd)
session.sendline(cmd)
try:
out = remote.handle_prompts(session, username, password,
prompt, timeout, debug=True)
except Exception, err:
session.close()
raise err
try:
session.cmd(quit_cmd)
session.close()
except Exception:
pass
return out
def setup_service(setup_target):
setup_timeout = int(params.get("setup_timeout", 360))
if setup_target == "localhost":
setup_func = utils.system_output
os_type = "linux"
else:
setup_vm = env.get_vm(setup_target)
setup_session = setup_vm.wait_for_login(timeout=timeout)
setup_func = setup_session.cmd
os_type = params["os_type"]
setup_params = params.object_params(os_type)
setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart")
prepare_cmd = setup_params.get("prepare_cmd")
setup_cmd = re.sub("SERVICE", setup_params.get("service", ""),
setup_cmd)
error.context("Set up %s service in %s" % (setup_params.get("service"),
setup_target),
logging.info)
if prepare_cmd:
setup_func(prepare_cmd, timeout=setup_timeout)
setup_func(setup_cmd, timeout=setup_timeout)
if setup_target != "localhost":
setup_session.close()
def stop_service(setup_target):
setup_timeout = int(params.get("setup_timeout", 360))
if setup_target == "localhost":
setup_func = utils.system_output
os_type = "linux"
else:
setup_vm = env.get_vm(setup_target)
setup_session = setup_vm.wait_for_login(timeout=timeout)
setup_func = setup_session.cmd
os_type = params["os_type"]
setup_params = params.object_params(os_type)
stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop")
cleanup_cmd = setup_params.get("cleanup_cmd")
stop_cmd = re.sub("SERVICE", setup_params.get("service", ""),
stop_cmd)
error.context("Stop %s service in %s" % (setup_params.get("service"),
setup_target),
logging.info)
if stop_cmd:
setup_func(stop_cmd, timeout=setup_timeout)
if cleanup_cmd:
setup_func(cleanup_cmd, timeout=setup_timeout)
if setup_target != "localhost":
setup_session.close()
timeout = int(params.get("login_timeout", '360'))
op_timeout = int(params.get("op_timeout", "360"))
acl_protocol = params['acl_protocol']
acl_extra_options = params.get("acl_extra_options", "")
for vm in env.get_all_vms():
session = vm.wait_for_login(timeout=timeout)
if params.get("disable_iptables") == "yes":
session.cmd("iptables -F")
#session.cmd_status_output("service iptables stop")
if params.get("copy_scripts"):
root_dir = data_dir.get_root_dir()
script_dir = os.path.join(root_dir, "shared", "scripts")
tmp_dir = params.get("tmp_dir", "C:\\")
for script in params.get("copy_scripts").split():
script_path = os.path.join(script_dir, script)
vm.copy_files_to(script_path, tmp_dir)
session.close()
vms_tags = params.objects("vms")
br_name = params.get("netdst")
if br_name == "private":
br_name = params.get("priv_brname", 'autotest-prbr0')
for setup_target in params.get("setup_targets", "").split():
setup_service(setup_target)
access_targets = params.get("access_targets", "localhost").split()
deny_target = params.get("deny_target", "localhost")
all_target = params.get("extra_target", "").split() + vms_tags
target_port = params["target_port"]
vm = env.get_vm(vms_tags[0])
nic = vm.virtnet[0]
if_name = nic.ifname
params_nic = params.object_params("nic1")
if params["netdst"] == "private":
params_nic["netdst"] = params_nic.get("priv_brname", "atbr0")
host_ip = utils_net.get_host_ip_address(params_nic)
if deny_target in vms_tags:
deny_vm = env.get_vm(deny_target)
deny_vm_ip = deny_vm.wait_for_get_address(0)
elif deny_target == "localhost":
deny_vm_ip = host_ip
if "NW_DST" in acl_extra_options:
acl_extra_options = re.sub("NW_DST", deny_vm_ip, acl_extra_options)
acl_extra_options = re.sub("TARGET_PORT", target_port, acl_extra_options)
access_sys = {}
for target in all_target:
if target not in access_targets:
if target in vms_tags:
os_type = params["os_type"]
else:
os_type = "linux"
os_params = params.object_params(os_type)
access_param = os_params.object_params(target)
check_from_output = access_param.get("check_from_output")
access_sys[target] = {}
access_sys[target]['access_cmd'] = access_param['access_cmd']
access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "")
access_sys[target]['clean_cmd'] = access_param.get('clean_guest',
"")
if check_from_output:
access_sys[target]['check_from_output'] = check_from_output
for tgt in access_targets:
tgt_param = access_param.object_params(tgt)
acl_disabled = tgt_param.get("acl_disabled") == "yes"
access_sys[target]['disabled_%s' % tgt] = acl_disabled
error.context("Try to access target before setup the rules", logging.info)
access_service(access_sys, access_targets, False, host_ip, ref=True)
error.context("Disable the access in ovs", logging.info)
br_infos = utils_net.openflow_manager(br_name, "show").stdout
if_port = re.findall("(\d+)\(%s\)" % if_name, br_infos)
if not if_port:
raise error.TestNAError("Can not find %s in bridge %s" % (if_name,
br_name))
if_port = if_port[0]
acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options)
utils_net.openflow_manager(br_name, "add-flow", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if not acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Can not find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam the disable rules",
logging.info)
access_service(access_sys, access_targets, True, host_ip)
error.context("Enable the access in ovs", logging.info)
acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options)
utils_net.openflow_manager(br_name, "mod-flows", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if not acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Can not find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam the enable rules",
logging.info)
access_service(access_sys, access_targets, False, host_ip)
error.context("Delete the access rules in ovs", logging.info)
acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options)
utils_net.openflow_manager(br_name, "del-flows", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Still can find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam after delete the rules",
logging.info)
access_service(access_sys, access_targets, False, host_ip)
for setup_target in params.get("setup_targets", "").split():
stop_service(setup_target)
| gpl-2.0 |
HLFH/CouchPotatoServer | libs/suds/builder.py | 197 | 4220 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{builder} module provides an wsdl/xsd defined types factory
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Factory
log = getLogger(__name__)
class Builder:
""" Builder used to construct an object for types defined in the schema """
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, basestring):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.unbounded():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
| gpl-3.0 |
ashutosh-mishra/youtube-dl | youtube_dl/extractor/jeuxvideo.py | 1 | 1985 | # coding: utf-8
import json
import re
import xml.etree.ElementTree
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
_TEST = {
u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
u'file': u'5182.mp4',
u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee',
u'info_dict': {
u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité',
u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
xml_link = self._html_search_regex(
r'<param name="flashvars" value="config=(.*?)" />',
webpage, u'config URL')
video_id = self._search_regex(
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
xml_link, u'video ID')
xml_config = self._download_webpage(
xml_link, title, u'Downloading XML config')
config = xml.etree.ElementTree.fromstring(xml_config.encode('utf-8'))
info_json = self._search_regex(
r'(?sm)<format\.json>(.*?)</format\.json>',
xml_config, u'JSON information')
info = json.loads(info_json)['versions'][0]
video_url = 'http://video720.jeuxvideo.com/' + info['file']
return {
'id': video_id,
'title': config.find('titre_video').text,
'ext': 'mp4',
'url': video_url,
'description': self._og_search_description(webpage),
'thumbnail': config.find('image').text,
}
| unlicense |
getnikola/plugins | v7/mistune/mistune.py | 1 | 3824 | # -*- coding: utf-8 -*-
# Copyright © 2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on Mistune."""
from __future__ import unicode_literals
import codecs
import os
import re
try:
import mistune
except ImportError:
mistune = None # NOQA
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # NOQA
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, write_metadata
class CompileMistune(PageCompiler):
"""Compile Markdown into HTML using Mistune."""
name = "mistune"
demote_headers = True
def __init__(self, *args, **kwargs):
super(CompileMistune, self).__init__(*args, **kwargs)
if mistune is not None:
self.parser = mistune.Markdown()
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
if mistune is None:
req_missing(['mistune'], 'build this site (compile with mistune)')
makedirs(os.path.dirname(dest))
with codecs.open(dest, "w+", "utf8") as out_file:
with codecs.open(source, "r", "utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
output = self.parser(data)
output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
out_file.write(output)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
def compile_html(self, source, dest, is_two_file=True):
"""Compile the post into HTML (deprecated API)."""
try:
post = self.site.post_per_input_file[source]
except KeyError:
post = None
return compile(source, dest, is_two_file, post, None)
def create_post(self, path, **kw):
content = kw.pop('content', 'Write your post here.')
onefile = kw.pop('onefile', False)
kw.pop('is_page', False)
metadata = OrderedDict()
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with codecs.open(path, "wb+", "utf8") as fd:
if onefile:
fd.write('<!-- \n')
fd.write(write_metadata(metadata))
fd.write('-->\n\n')
fd.write(content)
| mit |
RNAer/qiita | qiita_db/reference.py | 2 | 6342 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from os.path import join
from .base import QiitaObject
from .exceptions import QiitaDBDuplicateError
from .util import (insert_filepaths, convert_to_id,
get_mountpoint)
from .sql_connection import SQLConnectionHandler
class Reference(QiitaObject):
r"""Object to interact with reference sequence databases
Attributes
----------
sequence_fp
taxonomy_fp
tree_fp
Methods
-------
create
exists
See Also
--------
QiitaObject
"""
_table = "reference"
@classmethod
def create(cls, name, version, seqs_fp, tax_fp=None, tree_fp=None):
r"""Creates a new reference object with a new id on the storage system
Parameters
----------
name : str
The name of the reference database
version : str
The version of the reference database
seqs_fp : str
The path to the reference sequence file
tax_fp : str, optional
The path to the reference taxonomy file
tree_fp : str, optional
The path to the reference tree file
Returns
-------
A new instance of `cls` to access to the Reference stored in the DB
Raises
------
QiitaDBDuplicateError
If the reference database with name `name` and version `version`
already exists on the system
"""
if cls.exists(name, version):
raise QiitaDBDuplicateError("Reference",
"Name: %s, Version: %s"
% (name, version))
conn_handler = SQLConnectionHandler()
seq_id = insert_filepaths([(seqs_fp, convert_to_id("reference_seqs",
"filepath_type",
conn_handler))],
"%s_%s" % (name, version), "reference",
"filepath", conn_handler)[0]
# Check if the database has taxonomy file
tax_id = None
if tax_fp:
fps = [(tax_fp, convert_to_id("reference_tax", "filepath_type",
conn_handler))]
tax_id = insert_filepaths(fps, "%s_%s" % (name, version),
"reference", "filepath", conn_handler)[0]
# Check if the database has tree file
tree_id = None
if tree_fp:
fps = [(tree_fp, convert_to_id("reference_tree", "filepath_type",
conn_handler))]
tree_id = insert_filepaths(fps, "%s_%s" % (name, version),
"reference", "filepath",
conn_handler)[0]
# Insert the actual object to the db
ref_id = conn_handler.execute_fetchone(
"INSERT INTO qiita.{0} (reference_name, reference_version, "
"sequence_filepath, taxonomy_filepath, tree_filepath) VALUES "
"(%s, %s, %s, %s, %s) RETURNING reference_id".format(cls._table),
(name, version, seq_id, tax_id, tree_id))[0]
return cls(ref_id)
@classmethod
def exists(cls, name, version):
r"""Checks if a given object info is already present on the DB
Parameters
----------
name : str
The name of the reference database
version : str
The version of the reference database
Raises
------
QiitaDBNotImplementedError
If the method is not overwritten by a subclass
"""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.{0} WHERE "
"reference_name=%s AND reference_version=%s)".format(cls._table),
(name, version))[0]
@property
def name(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT reference_name FROM qiita.{0} WHERE "
"reference_id = %s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
@property
def version(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT reference_version FROM qiita.{0} WHERE "
"reference_id = %s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
@property
def sequence_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.sequence_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
@property
def taxonomy_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.taxonomy_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
@property
def tree_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.tree_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
| bsd-3-clause |
Curso-OpenShift/Formulario | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-3.0 |
AthinaB/synnefo | snf-astakos-app/astakos/api/user.py | 4 | 11741 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import wraps, partial
from django.views.decorators.csrf import csrf_exempt
from django import http
from astakos.im import transaction
from django.utils import simplejson as json
from django.forms.models import model_to_dict
from django.core.validators import validate_email, ValidationError
from snf_django.lib import api
from snf_django.lib.api import faults
from .util import (
get_uuid_displayname_catalogs as get_uuid_displayname_catalogs_util,
send_feedback as send_feedback_util,
user_from_token)
from astakos.im import settings
from astakos.admin import stats
from astakos.im.models import AstakosUser, get_latest_terms
from astakos.im.auth import make_local_user
from astakos.im import activation_backends
ADMIN_GROUPS = settings.ADMIN_API_PERMITTED_GROUPS
activation_backend = activation_backends.get_backend()
logger = logging.getLogger(__name__)
@csrf_exempt
@api.api_method(http_method="POST", token_required=True, user_required=False,
logger=logger)
@user_from_token # Authenticate user!!
def get_uuid_displayname_catalogs(request):
# Normal Response Codes: 200
# Error Response Codes: internalServerError (500)
# badRequest (400)
# unauthorised (401)
return get_uuid_displayname_catalogs_util(request)
@csrf_exempt
@api.api_method(http_method="POST", token_required=True, user_required=False,
logger=logger)
@user_from_token # Authenticate user!!
def send_feedback(request, email_template_name='im/feedback_mail.txt'):
# Normal Response Codes: 200
# Error Response Codes: internalServerError (500)
# badRequest (400)
# unauthorised (401)
return send_feedback_util(request, email_template_name)
# API ADMIN UTILS AND ENDPOINTS
def user_api_method(http_method):
"""
Common decorator for user admin api views.
"""
def wrapper(func):
@api.api_method(http_method=http_method, user_required=True,
token_required=True, logger=logger,
serializations=['json'])
@api.user_in_groups(permitted_groups=ADMIN_GROUPS,
logger=logger)
@wraps(func)
def method(*args, **kwargs):
return func(*args, **kwargs)
return method
return wrapper
def user_to_dict(user, detail=True):
user_fields = ['first_name', 'last_name', 'email']
date_fields = ['date_joined', 'moderated_at', 'verified_at',
'auth_token_expires']
status_fields = ['is_active', 'is_rejected', 'deactivated_reason',
'accepted_policy', 'rejected_reason']
if not detail:
fields = user_fields
date_fields = []
d = model_to_dict(user, fields=user_fields + status_fields)
d['id'] = user.uuid
for date_field in date_fields:
val = getattr(user, date_field)
if val:
d[date_field] = api.utils.isoformat(getattr(user, date_field))
else:
d[date_field] = None
methods = d['authentication_methods'] = []
d['roles'] = list(user.groups.values_list("name", flat=True))
for provider in user.auth_providers.filter():
method_fields = ['identifier', 'active', 'affiliation']
method = model_to_dict(provider, fields=method_fields)
method['backend'] = provider.auth_backend
method['metadata'] = provider.info
if provider.auth_backend == 'astakos':
method['identifier'] = user.email
methods.append(method)
return d
def users_demux(request):
if request.method == 'GET':
return users_list(request)
elif request.method == 'POST':
return users_create(request)
else:
return api.api_method_not_allowed(request)
def user_demux(request, user_id):
if request.method == 'GET':
return user_detail(request, user_id)
elif request.method == 'PUT':
return user_update(request, user_id)
else:
return api.api_method_not_allowed(request)
@user_api_method('GET')
def users_list(request, action='list', detail=False):
logger.debug('users_list detail=%s', detail)
users = AstakosUser.objects.filter()
dict_func = partial(user_to_dict, detail=detail)
users_dicts = map(dict_func, users)
data = json.dumps({'users': users_dicts})
return http.HttpResponse(data, status=200,
content_type='application/json')
@user_api_method('POST')
@transaction.commit_on_success
def users_create(request):
user_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('users_create: %s request: %s', user_id, req)
user_data = req.get('user', {})
email = user_data.get('username', None)
first_name = user_data.get('first_name', None)
last_name = user_data.get('last_name', None)
affiliation = user_data.get('affiliation', None)
password = user_data.get('password', None)
metadata = user_data.get('metadata', {})
password_gen = AstakosUser.objects.make_random_password
if not password:
password = password_gen()
try:
validate_email(email)
except ValidationError:
raise faults.BadRequest("Invalid username (email format required)")
if AstakosUser.objects.verified_user_exists(email):
raise faults.Conflict("User '%s' already exists" % email)
if not first_name:
raise faults.BadRequest("Invalid first_name")
if not last_name:
raise faults.BadRequest("Invalid last_name")
has_signed_terms = not(get_latest_terms())
try:
user = make_local_user(email, first_name=first_name,
last_name=last_name, password=password,
has_signed_terms=has_signed_terms)
if metadata:
# we expect a unique local auth provider for the user
provider = user.auth_providers.get()
provider.info = metadata
provider.affiliation = affiliation
provider.save()
user = AstakosUser.objects.get(pk=user.pk)
code = user.verification_code
ver_res = activation_backend.handle_verification(user, code)
if ver_res.is_error():
raise Exception(ver_res.message)
mod_res = activation_backend.handle_moderation(user, accept=True)
if mod_res.is_error():
raise Exception(ver_res.message)
except Exception, e:
raise faults.BadRequest(e.message)
user_data = {
'id': user.uuid,
'password': password,
'auth_token': user.auth_token,
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200, content_type='application/json')
@user_api_method('POST')
@transaction.commit_on_success
def user_action(request, user_id):
admin_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('user_action: %s user: %s request: %s', admin_id, user_id, req)
if 'activate' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
activation_backend.activate_user(user)
user = AstakosUser.objects.get(uuid=user_id)
user_data = {
'id': user.uuid,
'is_active': user.is_active
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
if 'deactivate' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
activation_backend.deactivate_user(
user, reason=req['deactivate'].get('reason', None))
user = AstakosUser.objects.get(uuid=user_id)
user_data = {
'id': user.uuid,
'is_active': user.is_active
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
if 'renewToken' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
user.renew_token()
user.save()
user_data = {
'id': user.uuid,
'auth_token': user.auth_token,
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
raise faults.BadRequest("Invalid action")
@user_api_method('PUT')
@transaction.commit_on_success
def user_update(request, user_id):
admin_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('user_update: %s user: %s request: %s', admin_id, user_id, req)
user_data = req.get('user', {})
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
email = user_data.get('username', None)
first_name = user_data.get('first_name', None)
last_name = user_data.get('last_name', None)
affiliation = user_data.get('affiliation', None)
password = user_data.get('password', None)
metadata = user_data.get('metadata', {})
if 'password' in user_data:
user.set_password(password)
if 'username' in user_data:
try:
validate_email(email)
except ValidationError:
raise faults.BadRequest("Invalid username (email format required)")
if AstakosUser.objects.verified_user_exists(email):
raise faults.Conflict("User '%s' already exists" % email)
user.email = email
if 'first_name' in user_data:
user.first_name = first_name
if 'last_name' in user_data:
user.last_name = last_name
try:
user.save()
if 'metadata' in user_data:
provider = user.auth_providers.get(auth_backend="astakos")
provider.info = metadata
if affiliation in user_data:
provider.affiliation = affiliation
provider.save()
except Exception, e:
raise faults.BadRequest(e.message)
data = json.dumps({'user': user_to_dict(user)})
return http.HttpResponse(data, status=200, content_type='application/json')
@user_api_method('GET')
def user_detail(request, user_id):
admin_id = request.user_uniq
logger.info('user_detail: %s user: %s', admin_id, user_id)
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
user_data = user_to_dict(user, detail=True)
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200, content_type='application/json')
| gpl-3.0 |
adw0rd/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/transactions/tests_25.py | 51 | 5306 | from __future__ import with_statement
from django.db import connection, transaction, IntegrityError
from django.test import TransactionTestCase, skipUnlessDBFeature
from models import Reporter
class TransactionContextManagerTests(TransactionTestCase):
def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
with self.assertRaises(Exception):
self.create_reporter_and_fail()
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
"""
The autocommit context manager works exactly the same as the default
behavior.
"""
with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
"""
The autocommit context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.autocommit(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success context manager, the transaction is only
committed if the block doesn't throw an exception.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
with transaction.commit_on_success():
Reporter.objects.filter(first_name="Alice").delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
with transaction.autocommit():
with transaction.commit_on_success():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
with transaction.commit_manually():
Reporter.objects.create(first_name="Libby", last_name="Holtzman")
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name="Scott", last_name="Browning")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using="default"):
Reporter.objects.create(first_name="Walter", last_name="Cronkite")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.set_dirty()
transaction.rollback()
| gpl-3.0 |
wpoa/wiki-imports | lib/python2.7/site-packages/mwparserfromhell/nodes/extras/__init__.py | 1 | 1389 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This package contains objects used by
:py:class:`~.Node`\ s, but are not nodes themselves. This includes the
parameters of Templates or the attributes of HTML tags.
"""
from .attribute import Attribute
from .parameter import Parameter
| gpl-3.0 |
botswana-harvard/edc-pharma | edc_pharmacy/old/dispense/prescription_creator.py | 1 | 3055 | from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from ..medications import medications
from ..models import Prescription, MedicationDefinition
class PrescriptionCreator:
"""Creates all prescription records after completing patient history model.
"""
def __init__(self, appointment=None, selected=None, medication_name=None,
options=None):
self.medication_name = medication_name
self.appointment = appointment
self.selected = selected
self.options = options
self.save_or_update()
def create_all(self):
"""Create prescription record per medication and update the next
refill datetime.
"""
if self.selected:
medication_definition = medications.get(self.medication_name)
self.create_history(medication_definition=medication_definition)
else:
for medication_definition in self.appointment.profile_medications:
self.create_prescription(
medication_definition=medication_definition)
self.appointment.update_next_dispense_datetime()
def medication(self, medication_definition=None):
try:
medication_obj = MedicationDefinition.objects.get(
name=medication_definition.name)
except ObjectDoesNotExist:
medication_obj = MedicationDefinition.objects.create(
name=medication_definition.name,
unit=medication_definition.unit,
category=medication_definition.category,
description=medication_definition.description,
single_dose=medication_definition.single_dose,
use_body_weight=medication_definition.use_body_weight,
milligram=medication_definition.milligram,
strength=medication_definition.strength)
return medication_obj
def create_prescription(self, medication_definition=None):
medication_obj = self.medication(
medication_definition=medication_definition)
model_obj = self.prescription(
medication_definition=medication_obj)
model_obj.save()
def save_or_update(self):
self.create_all()
self.appointment.save()
def prescription(self, medication_definition=None):
try:
prescription = Prescription.objects.get(
appointment=self.appointment,
medication_definition=medication_definition)
except Prescription.DoesNotExist:
prescription = Prescription.objects.create(
appointment=self.appointment,
dispense_datetime=datetime.today(),
medication_definition=medication_definition,
subject_identifier=self.appointment.subject_identifier,
medication_description=medication_definition.description,
category=medication_definition.category,
** self.options)
return prescription
| gpl-2.0 |
alabid/pycrunchbase | tests/test_node.py | 3 | 1070 | from unittest import TestCase
import json
from pycrunchbase.resource.node import Node
from pycrunchbase.resource.utils import parse_date
class TestNode(Node):
KNOWN_PROPERTIES = ['property1', 'property2']
def _coerce_values(self):
# intentionally coerce bad values for test purposes
attr = 'property1'
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
data = {
"type": "TestNode",
"uuid": "uuid",
'properties': {
'property1': 'one',
'property2': 'two'
},
'relationships': {
'unknown': {
'paging': {},
'items': {}
}
},
}
class NodeTestCase(TestCase):
def test_node_creation_from_dict(self):
node = TestNode(data)
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
def test_node_creation_from_string(self):
node = TestNode(json.dumps(data))
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
| mit |
b0ttl3z/SickRage | lib/synchronousdeluge/transfer.py | 114 | 1346 | import zlib
import struct
import socket
import ssl
from synchronousdeluge import rencode
__all__ = ["DelugeTransfer"]
class DelugeTransfer(object):
def __init__(self):
self.sock = None
self.conn = None
self.connected = False
def connect(self, hostport):
if self.connected:
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_TLSv1)
self.connected = True
def disconnect(self):
if self.conn:
self.conn.close()
self.connected = False
def send_request(self, request):
data = (request.format(),)
payload = zlib.compress(rencode.dumps(data))
self.conn.sendall(payload)
buf = b""
while True:
data = self.conn.recv(1024)
if not data:
self.connected = False
break
buf += data
dobj = zlib.decompressobj()
try:
message = rencode.loads(dobj.decompress(buf))
except (ValueError, zlib.error, struct.error):
# Probably incomplete data, read more
continue
else:
buf = dobj.unused_data
yield message
| gpl-3.0 |
jymannob/CouchPotatoServer | libs/subliminal/services/thesubdb.py | 107 | 2775 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie, UnknownVideo
import logging
logger = logging.getLogger(__name__)
class TheSubDB(ServiceBase):
server_url = 'http://api.thesubdb.com'
user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)'
api_based = True
# Source: http://api.thesubdb.com/?action=languages
languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it',
'la', 'nl', 'no', 'oc', 'pl', 'pt', 'ro', 'ru', 'sl', 'sr', 'sv',
'tr'])
videos = [Movie, Episode, UnknownVideo]
require_video = True
def list_checked(self, video, languages):
return self.query(video.path, video.hashes['TheSubDB'], languages)
def query(self, filepath, moviehash, languages):
r = self.session.get(self.server_url, params={'action': 'search', 'hash': moviehash})
if r.status_code == 404:
logger.debug(u'Could not find subtitles for hash %s' % moviehash)
return []
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
available_languages = language_set(r.content.split(','))
languages &= available_languages
if not languages:
logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages))
return []
subtitles = []
for language in languages:
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s?action=download&hash=%s&language=%s' % (self.server_url, moviehash, language.alpha2))
subtitles.append(subtitle)
return subtitles
Service = TheSubDB
| gpl-3.0 |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
inetCatapult/troposphere | troposphere/policies.py | 20 | 1033 | from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class CreationPolicy(AWSAttribute):
props = {
'ResourceSignal': (ResourceSignal, True),
}
| bsd-2-clause |
cabe56/bookmarks | vendor/requests/models.py | 277 | 26436 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import DecodeError
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError, ContentDecodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_moved, # 307
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| gpl-2.0 |
bakerlover/lab5 | main/admin.py | 7 | 2264 | # -*- coding: utf-8 -*-
from flask.ext import wtf
from google.appengine.api import app_identity
import flask
import auth
import util
import model
import config
from main import app
class ConfigUpdateForm(wtf.Form):
analytics_id = wtf.StringField('Analytics ID', filters=[util.strip_filter])
announcement_html = wtf.TextAreaField('Announcement HTML', filters=[util.strip_filter])
announcement_type = wtf.SelectField('Announcement Type', choices=[(t, t.title()) for t in model.Config.announcement_type._choices])
brand_name = wtf.StringField('Brand Name', [wtf.validators.required()], filters=[util.strip_filter])
facebook_app_id = wtf.StringField('Facebook App ID', filters=[util.strip_filter])
facebook_app_secret = wtf.StringField('Facebook App Secret', filters=[util.strip_filter])
feedback_email = wtf.StringField('Feedback Email', [wtf.validators.optional(), wtf.validators.email()], filters=[util.email_filter])
flask_secret_key = wtf.StringField('Flask Secret Key', [wtf.validators.required()], filters=[util.strip_filter])
twitter_consumer_key = wtf.StringField('Twitter Consumer Key', filters=[util.strip_filter])
twitter_consumer_secret = wtf.StringField('Twitter Consumer Secret', filters=[util.strip_filter])
@app.route('/_s/admin/config/', endpoint='admin_config_update_service')
@app.route('/admin/config/', methods=['GET', 'POST'])
@auth.admin_required
def admin_config_update():
config_db = model.Config.get_master_db()
form = ConfigUpdateForm(obj=config_db)
if form.validate_on_submit():
form.populate_obj(config_db)
config_db.put()
reload(config)
app.config.update(CONFIG_DB=config_db)
return flask.redirect(flask.url_for('welcome'))
if flask.request.path.startswith('/_s/'):
return util.jsonify_model_db(config_db)
instances_url = None
if config.PRODUCTION:
instances_url = '%s?app_id=%s&version_id=%s' % (
'https://appengine.google.com/instances',
app_identity.get_application_id(),
config.CURRENT_VERSION_ID,
)
return flask.render_template(
'admin/config_update.html',
title='Admin Config',
html_class='admin-config',
form=form,
config_db=config_db,
instances_url=instances_url,
has_json=True,
)
| mit |
tensorflow/federated | tensorflow_federated/python/simulation/baselines/cifar100/__init__.py | 1 | 1147 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for constructing baseline tasks for the CIFAR-100 dataset."""
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import create_image_classification_task
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import DEFAULT_CROP_HEIGHT
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import DEFAULT_CROP_WIDTH
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import ResnetModel
| apache-2.0 |
kk7ds/luvs | unifi_stream_server.py | 1 | 8384 | import asyncio
import aiohttp
from aiohttp import web
import logging
from logging import handlers
import signal
import socket
import time
import unifi_ws_server
class StreamerContext(object):
pass
class RequestHandler(aiohttp.server.ServerHttpProtocol):
def __init__(self, **kwargs):
self._log = kwargs.pop('log')
super(RequestHandler, self).__init__(**kwargs)
def _do_stream(self, message, payload, camera_mac, stream):
response = aiohttp.Response(self.writer, 200,
http_version=message.version)
try:
self._context = yield from controller.stream_camera(camera_mac,
stream,
response)
except NoSuchCamera:
response = aiohttp.Response(self.writer, 404)
response.send_headers()
response.write_eof()
return
except CameraInUse:
response = aiohttp.Response(self.writer, 409)
response.send_headers()
response.write_eof()
return
while (self._context.streaming
and controller.ws_server.is_camera_managed(camera_mac)):
yield from asyncio.sleep(1)
self._log.debug('Closing HTTP streaming connection for %s' % camera_mac)
response.write_eof()
self._context.controller.streaming_stopped(self._context)
def connection_lost(self, exc):
self._context.controller.streaming_stopped(self._context)
super(RequestHandler, self).connection_lost(exc)
@asyncio.coroutine
def handle_request(self, message, payload):
self._log.debug('GET %s' % message.path)
path_elements = message.path.split('/')
self._log.debug('Path: %s' % path_elements)
if len(path_elements) == 4 and path_elements[1] == 'stream':
camera_mac = path_elements[2]
stream = path_elements[3]
self._log.debug('Requested stream %s for %s' % (stream,
camera_mac))
yield from self._do_stream(message, payload, camera_mac, stream)
else:
response = aiohttp.Response(self.writer, 403)
response.send_headers()
response.write_eof()
class Streamer(asyncio.Protocol):
def __init__(self):
super(Streamer, self).__init__()
@classmethod
def factory(cls, context):
def make_thing():
instance = cls()
instance._context = context
instance.log = context.controller.log.getChild('strm')
return instance
return make_thing
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
self.log.info('Connection from %s:%i' % peername)
self.transport = transport
self.bytes = 0
self.last_report = 0
if not self._context.response.is_headers_sent():
self._context.response.send_headers()
def _cleanup_everything(self):
try:
result = self._context.controller.streaming_stopped(self._context)
except:
self.log.exception('While stopping streaming')
try:
self.transport.close()
except:
pass
self.log.debug('Total data proxied: %i KB' % (self.bytes / 1024))
def connection_lost(self, exc):
self._cleanup_everything()
def data_received(self, data):
try:
self._context.response.write(data)
self.bytes += len(data)
except socket.error:
self.log.debug('Receiver vanished')
self._cleanup_everything()
except Exception as e:
self.log.exception('Unexpected error: %s' % e)
self._cleanup_everything()
if (time.time() - self.last_report) > 10:
self.log.debug('Proxied %i KB for %s/%s' % (
self.bytes / 1024,
self._context.camera_mac,
self._context.stream))
self.last_report = time.time()
class NoSuchCamera(Exception):
pass
class CameraInUse(Exception):
pass
class UVCController(object):
def __init__(self, my_ip, baseport=7000):
self._cameras = {}
self.my_ip = my_ip
self.baseport = baseport
self.log = logging.getLogger('ctrl')
self.ws_server = unifi_ws_server.UVCWebsocketServer(
log=self.log.getChild('ws'))
@asyncio.coroutine
def init_server(self, loop):
port = 9999
srv = yield from loop.create_server(
lambda: RequestHandler(log=self.log.getChild('http'), debug=True),
'0.0.0.0', port)
self.log.info('HTTP stream server started on port %i' % port)
return srv
def start(self):
loop = self.loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGUSR1,
self.ws_server.reload_all_configs)
ws_server_server = loop.run_until_complete(
self.ws_server.make_server(7443))
http_server = loop.run_until_complete(self.init_server(loop))
loop.run_forever()
def get_free_port(self):
ports_in_use = [x.streamer_port for x in self._cameras.values()]
for i in range(self.baseport, self.baseport + 100):
if i not in ports_in_use:
return i
raise Exception('Too many ports')
def stream_camera(self, camera_mac, stream, response):
if not self.ws_server.is_camera_managed(camera_mac):
raise NoSuchCamera('No such camera')
if (camera_mac, stream) in self._cameras:
raise CameraInUse('Camera in use')
context = StreamerContext()
context.streaming = True
context.controller = self
context.camera_mac = camera_mac
context.stream = stream
context.response = response
context.streamer_port = self.get_free_port()
self.log.debug('Starting stream listener on port %i for camera %s' % (
context.streamer_port, camera_mac))
context.streamer = yield from self.loop.create_server(
Streamer.factory(context), '0.0.0.0', context.streamer_port)
self._cameras[(camera_mac, stream)] = context
yield from self.ws_server.start_video(camera_mac, self.my_ip,
context.streamer_port,
stream=context.stream)
return context
def streaming_stopped(self, context):
if not context.streaming:
# We've already done cleanup here
return
context.streaming = False
self.log.info('Stopping %s camera streaming' % context.camera_mac)
try:
context.streamer.close()
except:
self.log.exception('Failed to stop streaming server')
@asyncio.coroutine
def stop():
try:
yield from self.ws_server.stop_video(context.camera_mac,
stream=context.stream)
except unifi_ws_server.NoSuchCamera:
pass
asyncio.async(stop())
del self._cameras[(context.camera_mac, context.stream)]
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('You must specify the IP of this server')
sys.exit(1)
log_format = '%(asctime)s %(name)s/%(levelname)s: %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
logging.getLogger(None).setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.ERROR)
logging.getLogger('websockets').setLevel(logging.WARNING)
lf = logging.Formatter(log_format, datefmt=date_format)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(lf)
logging.getLogger(None).addHandler(console)
debuglg = handlers.RotatingFileHandler('debug.log',
maxBytes=5*1024*1024,
backupCount=4)
debuglg.setLevel(logging.DEBUG)
debuglg.setFormatter(lf)
logging.getLogger(None).addHandler(debuglg)
controller = UVCController(sys.argv[1])
controller.start()
| gpl-3.0 |
RuudBurger/CouchPotatoServer | couchpotato/core/media/movie/providers/metadata/wdtv.py | 39 | 7626 | from xml.etree.ElementTree import Element, SubElement, tostring
import os
import re
import traceback
import xml.dom.minidom
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
autoload = 'WdtvLive'
log = CPLog(__name__)
class WdtvLive(MovieMetaData):
def getThumbnailName(self, name, root, i):
return self.createMetaName('%s.jpg', name, root)
def createMetaName(self, basename, name, root):
return os.path.join(root, basename.replace('%s', name))
def getNfoName(self, name, root, i):
return self.createMetaName('%s.xml', name, root)
def getNfo(self, movie_info=None, data=None, i=0):
if not data: data = {}
if not movie_info: movie_info = {}
nfoxml = Element('details')
# Title
try:
el = SubElement(nfoxml, 'title')
el.text = toUnicode(getTitle(data))
except:
pass
# IMDB id
try:
el = SubElement(nfoxml, 'id')
el.text = toUnicode(data['identifier'])
except:
pass
# Runtime
try:
runtime = SubElement(nfoxml, 'runtime')
runtime.text = '%s min' % movie_info.get('runtime')
except:
pass
# Other values
types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released']
for type in types:
if ':' in type:
name, type = type.split(':')
else:
name = type
try:
if movie_info.get(type):
el = SubElement(nfoxml, name)
el.text = toUnicode(movie_info.get(type, ''))
except:
pass
# Rating
for rating_type in ['imdb', 'rotten', 'tmdb']:
try:
r, v = movie_info['rating'][rating_type]
rating = SubElement(nfoxml, 'rating')
rating.text = str(r)
votes = SubElement(nfoxml, 'votes')
votes.text = str(v)
break
except:
log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc()))
# Genre
for genre in movie_info.get('genres', []):
genres = SubElement(nfoxml, 'genre')
genres.text = toUnicode(genre)
# Actors
for actor_name in movie_info.get('actor_roles', {}):
role_name = movie_info['actor_roles'][actor_name]
actor = SubElement(nfoxml, 'actor')
name = SubElement(actor, 'name')
name.text = toUnicode(actor_name)
if role_name:
role = SubElement(actor, 'role')
role.text = toUnicode(role_name)
if movie_info['images']['actors'].get(actor_name):
thumb = SubElement(actor, 'thumb')
thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name))
# Directors
for director_name in movie_info.get('directors', []):
director = SubElement(nfoxml, 'director')
director.text = toUnicode(director_name)
# Writers
for writer in movie_info.get('writers', []):
writers = SubElement(nfoxml, 'credits')
writers.text = toUnicode(writer)
# Sets or collections
collection_name = movie_info.get('collection')
if collection_name:
collection = SubElement(nfoxml, 'set')
collection.text = toUnicode(collection_name)
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Images
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
image_types = [
('fanart', 'backdrop_original'),
('banner', 'banner'),
('discart', 'disc_art'),
('logo', 'logo'),
('clearart', 'clear_art'),
('landscape', 'landscape'),
('extrathumb', 'extra_thumbs'),
('extrafanart', 'extra_fanart'),
]
for image_type in image_types:
sub, type = image_type
sub_element = SubElement(nfoxml, sub)
for image_url in movie_info['images'][type]:
image = SubElement(sub_element, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
if data.get('renamed_files'):
for filename in data.get('renamed_files'):
if 'trailer' in filename:
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(filename)
trailer_found = True
if not trailer_found and data['files'].get('trailer'):
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(data['files']['trailer'][0])
# Add file metadata
fileinfo = SubElement(nfoxml, 'fileinfo')
streamdetails = SubElement(fileinfo, 'streamdetails')
# Video data
if data['meta_data'].get('video'):
video = SubElement(streamdetails, 'video')
codec = SubElement(video, 'codec')
codec.text = toUnicode(data['meta_data']['video'])
aspect = SubElement(video, 'aspect')
aspect.text = str(data['meta_data']['aspect'])
width = SubElement(video, 'width')
width.text = str(data['meta_data']['resolution_width'])
height = SubElement(video, 'height')
height.text = str(data['meta_data']['resolution_height'])
# Audio data
if data['meta_data'].get('audio'):
audio = SubElement(streamdetails, 'audio')
codec = SubElement(audio, 'codec')
codec.text = toUnicode(data['meta_data'].get('audio'))
channels = SubElement(audio, 'channels')
channels.text = toUnicode(data['meta_data'].get('audio_channels'))
# Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))
xml_string = nfoxml.toprettyxml(indent = ' ')
text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
xml_string = text_re.sub('>\g<1></', xml_string)
return xml_string.encode('utf-8')
config = [{
'name': 'wdtvlive',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'wdtvlive_metadata',
'label': 'WDTV Live',
'description': 'Metadata for WDTV',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'meta_nfo',
'label': 'NFO',
'default': True,
'type': 'bool',
'description': 'Generate metadata xml',
},
{
'name': 'meta_thumbnail',
'label': 'Thumbnail',
'default': True,
'type': 'bool',
'description': 'Generate thumbnail jpg',
}
],
},
],
}]
| gpl-3.0 |
k1203/meeting | public/app/assets/plugins/vector-map/converter/simplifier.py | 234 | 5985 | import argparse
import sys
import os
from osgeo import ogr
from osgeo import osr
import anyjson
import shapely.geometry
import shapely.ops
import codecs
import time
format = '%.8f %.8f'
tolerance = 0.01
infile = '/Users/kirilllebedev/Maps/50m-admin-0-countries/ne_50m_admin_0_countries.shp'
outfile = 'map.shp'
# Open the datasource to operate on.
in_ds = ogr.Open( infile, update = 0 )
in_layer = in_ds.GetLayer( 0 )
in_defn = in_layer.GetLayerDefn()
# Create output file with similar information.
shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists('map.shp'):
shp_driver.DeleteDataSource( outfile )
shp_ds = shp_driver.CreateDataSource( outfile )
shp_layer = shp_ds.CreateLayer( in_defn.GetName(),
geom_type = in_defn.GetGeomType(),
srs = in_layer.GetSpatialRef() )
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn( fld_index )
fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
fd.SetWidth( src_fd.GetWidth() )
fd.SetPrecision( src_fd.GetPrecision() )
shp_layer.CreateField( fd )
# Load geometries
geometries = []
for feature in in_layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
#if not shapelyGeometry.is_valid:
#buffer to fix selfcrosses
#shapelyGeometry = shapelyGeometry.buffer(0)
if shapelyGeometry:
geometries.append(shapelyGeometry)
in_layer.ResetReading()
start = int(round(time.time() * 1000))
# Simplification
points = []
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = format % line.coords[indexFrom]
pointTo = format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
print int(round(time.time() * 1000)) - start
simplifiedLines = {}
pivotPoints = {}
def simplifyRing(ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
pivotPoints[format % coords[0]] = True
pivotPoints[format % coords[-1]] = True
simpleLineKey = format % coords[0]+':'+format % coords[1]+':'+format % coords[-1]
simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = format % points[i]
if ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints)):
line = points[iFrom:i+1]
lineKey = format % line[-1]+':'+format % line[-2]+':'+format % line[0]
if lineKey in simplifiedLines:
simpleLine = simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(tolerance).coords
lineKey = format % line[0]+':'+format % line[1]+':'+format % line[-1]
simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(polygon):
simpleExtRing = simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
results = []
for geom in geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
counter = 0
while in_feat is not None:
if results[counter] is not None:
out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
out_feat.SetFrom( in_feat )
out_feat.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
results[counter]
)
)
)
shp_layer.CreateFeature( out_feat )
out_feat.Destroy()
else:
print 'geometry is too small: '+in_feat.GetField(16)
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
counter += 1
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
print int(round(time.time() * 1000)) - start | mit |
aequitas/home-assistant | tests/components/zha/test_switch.py | 7 | 2904 | """Test zha switch."""
from unittest.mock import call, patch
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_ON, STATE_OFF, STATE_UNAVAILABLE
from tests.common import mock_coro
from .common import (
async_init_zigpy_device, make_attribute, make_entity_id,
async_test_device_join, async_enable_traffic
)
ON = 1
OFF = 0
async def test_switch(hass, config_entry, zha_gateway):
"""Test zha switch platform."""
from zigpy.zcl.clusters.general import OnOff, Basic
from zigpy.zcl.foundation import Status
# create zigpy device
zigpy_device = await async_init_zigpy_device(
hass, [OnOff.cluster_id, Basic.cluster_id], [], None, zha_gateway)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(
config_entry, DOMAIN)
await hass.async_block_till_done()
cluster = zigpy_device.endpoints.get(1).on_off
entity_id = make_entity_id(DOMAIN, zigpy_device, cluster)
zha_device = zha_gateway.get_device(zigpy_device.ieee)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_gateway, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
attr = make_attribute(0, 1)
cluster.handle_message(False, 1, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
attr.value.value = 0
cluster.handle_message(False, 0, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([Status.SUCCESS, Status.SUCCESS])):
# turn on via UI
await hass.services.async_call(DOMAIN, 'turn_on', {
'entity_id': entity_id
}, blocking=True)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None)
# turn off from HA
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([Status.SUCCESS, Status.SUCCESS])):
# turn off via UI
await hass.services.async_call(DOMAIN, 'turn_off', {
'entity_id': entity_id
}, blocking=True)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None)
# test joining a new switch to the network and HA
await async_test_device_join(
hass, zha_gateway, OnOff.cluster_id, DOMAIN)
| apache-2.0 |
secretsquirrel/the-backdoor-factory | osslsigncode/misc/pagehash.py | 7 | 3180 | #!/usr/bin/python
import struct
import sys
import hashlib
from pyasn1.type import univ
from pyasn1.codec.ber import encoder, decoder
f = open(sys.argv[1], 'rb')
filehdr = f.read(1024)
if filehdr[0:2] != 'MZ':
print "Not a DOS file."
sys.exit(0)
pepos = struct.unpack('<I', filehdr[60:64])[0]
if filehdr[pepos:pepos+4] != 'PE\0\0':
print "Not a PE file."
sys.exit(0)
pepos += 4
nsections = struct.unpack('<H', filehdr[pepos+2:pepos+4])[0]
print "#sections", nsections
magic = struct.unpack('<H', filehdr[pepos+20:pepos+22])[0]
pe32plus = 0
if magic == 0x20b:
pe32plus = 1
elif magic == 0x10b:
pe32plus = 0
else:
print "Unknown magic", magic
sys.exit(0)
sectoralign = struct.unpack('<I', filehdr[pepos+52:pepos+56])[0]
print "Sector alignment", sectoralign
pos = pepos + 112 + pe32plus*16
nrvas = struct.unpack('<I', filehdr[pos:pos+4])[0]
print "#rvas", nrvas
pos += 4
tpos = pos
rvas = []
for i in range(0, nrvas):
(p1,p2) = struct.unpack('<II', filehdr[pos:pos+8])
rvas.append((p1,p2))
pos += 8
sections = []
for i in range(0, nsections):
(vsize,vaddr,rsize,raddr) = struct.unpack('<IIII', filehdr[pos+8:pos+24])
pos += 40
sections.append((vsize,vaddr,rsize,raddr))
hdrend = pos
print "End of headers", pos
print rvas
print sections
sigpos,siglen = rvas[4]
if sigpos == 0:
print "No signature found"
sys.exit(0)
f.seek(sigpos)
sigblob = f.read(siglen)
cid_page_hash = "\xa6\xb5\x86\xd5\xb4\xa1\x24\x66\xae\x05\xa2\x17\xda\x8e\x60\xd6"
oid_ph_v1 = "\x06\x01\x04\x01\x82\x37\x02\x03\x01"
oid_ph_v2 = "\x06\x01\x04\x01\x82\x37\x02\x03\x02"
p = sigblob.find(cid_page_hash)
if p == -1:
print "No page hash present"
sys.exit(0)
p += len(cid_page_hash)
sha1 = True
i = sigblob.find(oid_ph_v1)
if i == -1:
i = sigblob.find(oid_ph_v2)
if i == -1:
print "No page hash found"
sys.exit(0)
sha1 = False
p = i + len(oid_ph_v1)
blob = str(decoder.decode(sigblob[p:])[0].getComponentByPosition(0))
ph = []
i = 0
hashlen = 20
if not sha1:
hashlen = 24
while i < len(blob):
offset = struct.unpack('<I', blob[i:i+4])[0]
i += 4
data = blob[i:i+hashlen]
ph.append((offset,data.encode("hex")))
i += hashlen
if sha1:
md = hashlib.sha1()
else:
md = hashlib.sha256()
b = filehdr[0:pepos+84]
b += filehdr[pepos+88:tpos+4*8]
b += filehdr[tpos+5*8:1024]
b += '\0'*(4096-1024)
md.update(b)
digest = md.hexdigest()
print ""
print "Checking page hash..."
print ""
nph = [(0,digest)]
lastpos = 0
pagesize = sectoralign # ???
for vs,vo,rs,ro in sections:
l = 0
while l < rs:
f.seek(ro+l)
howmuch = pagesize
if rs - l < pagesize:
howmuch = rs - l
b = f.read(howmuch)
if howmuch < pagesize:
b = b + '\0' * (pagesize - (rs - l))
if sha1:
d = hashlib.sha1(b).hexdigest()
else:
d = hashlib.sha256(b).hexdigest()
nph.append((ro+l, d))
l += pagesize
lastpos = ro + rs
nph.append((lastpos,'0'*(2*hashlen)))
for i in range(0,len(nph)):
x=ph[i]
y=nph[i]
if x[0] != y[0] or x[1] != y[1]:
print "Not matching:", x, "!=", y
| bsd-3-clause |
kosz85/django | django/db/models/sql/query.py | 1 | 92537 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Counter, Iterator, Mapping, OrderedDict, namedtuple
from contextlib import suppress
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = OrderedDict()
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
# Clear the cached_property
with suppress(AttributeError):
del obj.base_table
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or has_limit or has_existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = ()
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None for q in outer_query.annotation_select.items()]
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return {
alias: val
for (alias, annotation), val
in zip(outer_query.annotation_select.items(), result)
}
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
if self._annotations:
self._annotations = OrderedDict(
(key, col.relabeled_clone(change_map)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.alias_map):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse = [a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join]
if reuse:
self.ref_alias(reuse[0])
return reuse[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
clone.clear_ordering(True)
return clone
def as_sql(self, compiler, connection):
return self.get_compiler(connection=connection).as_sql()
def resolve_lookup_value(self, value, can_reuse, allow_joins):
used_joins = set()
if hasattr(value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
processed_values = []
for sub_value in value:
if hasattr(sub_value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
processed_values.append(
sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
)
# The used_joins for a tuple of expressions is the union of
# the used_joins for the individual expressions.
used_joins.update(k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0))
return value, used_joins
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
if len(lookups) == 0:
lookups = ['exact']
for name in lookups[:-1]:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookups[-1])
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[-1]))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookups[-1])
lookup_class = lhs.get_lookup('exact')
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if lookup.rhs is None:
if lookup.lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup.lookup_name == 'exact' and lookup.rhs == ''):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, used_joins = self.resolve_lookup_value(value, can_reuse, allow_joins)
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = targets[0].get_col(alias, join_info.final_field)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = targets[0].get_col(alias, join_info.final_field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, join_info.targets[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
return JoinInfo(final_field, targets, opts, joins, path)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), reuse)
targets, _, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], join_info.targets[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(target.get_col(final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = ()
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
for col in annotation.get_group_by_cols():
group_by.append(col)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(set(names).union(self.annotation_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
self.set_group_by()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self._extra and not self._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the OrderedDict of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| bsd-3-clause |
ryancanhelpyou/servo | tests/wpt/update/upstream.py | 43 | 13551 | import os
import re
import subprocess
import sys
import urlparse
from wptrunner.update.sync import LoadManifest
from wptrunner.update.tree import get_unique_name
from wptrunner.update.base import Step, StepRunner, exit_clean, exit_unclean
from .tree import Commit, GitTree, Patch
import github
from .github import GitHub
def rewrite_patch(patch, strip_dir):
"""Take a Patch and convert to a different repository by stripping a prefix from the
file paths. Also rewrite the message to remove the bug number and reviewer, but add
a bugzilla link in the summary.
:param patch: the Patch to convert
:param strip_dir: the path prefix to remove
"""
if not strip_dir.startswith("/"):
strip_dir = "/%s"% strip_dir
new_diff = []
line_starts = ["diff ", "+++ ", "--- "]
for line in patch.diff.split("\n"):
for start in line_starts:
if line.startswith(start):
new_diff.append(line.replace(strip_dir, "").encode("utf8"))
break
else:
new_diff.append(line)
new_diff = "\n".join(new_diff)
assert new_diff != patch
return Patch(patch.author, patch.email, rewrite_message(patch), new_diff)
def rewrite_message(patch):
rest = patch.message.body
if patch.message.bug is not None:
return "\n".join([patch.message.summary,
patch.message.body,
"",
"Upstreamed from https://bugzilla.mozilla.org/show_bug.cgi?id=%s" %
patch.message.bug])
return "\n".join([patch.message.full_summary, rest])
class SyncToUpstream(Step):
"""Sync local changes to upstream"""
def create(self, state):
if not state.kwargs["upstream"]:
return
if not isinstance(state.local_tree, GitTree):
self.logger.error("Cannot sync with upstream from a non-Git checkout.")
return exit_clean
try:
import requests
except ImportError:
self.logger.error("Upstream sync requires the requests module to be installed")
return exit_clean
if not state.sync_tree:
os.makedirs(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "tests_path", "metadata_path",
"sync"]):
state.token = kwargs["token"]
runner = SyncToUpstreamRunner(self.logger, state)
runner.run()
class CheckoutBranch(Step):
"""Create a branch in the sync tree pointing at the last upstream sync commit
and check it out"""
provides = ["branch"]
def create(self, state):
self.logger.info("Updating sync tree from %s" % state.sync["remote_url"])
state.branch = state.sync_tree.unique_branch_name(
"outbound_update_%s" % state.test_manifest.rev)
state.sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.branch)
state.sync_tree.checkout(state.test_manifest.rev, state.branch, force=True)
class GetLastSyncCommit(Step):
"""Find the gecko commit at which we last performed a sync with upstream."""
provides = ["last_sync_path", "last_sync_commit"]
def create(self, state):
self.logger.info("Looking for last sync commit")
state.last_sync_path = os.path.join(state.metadata_path, "mozilla-sync")
with open(state.last_sync_path) as f:
last_sync_sha1 = f.read().strip()
state.last_sync_commit = Commit(state.local_tree, last_sync_sha1)
if not state.local_tree.contains_commit(state.last_sync_commit):
self.logger.error("Could not find last sync commit %s" % last_sync_sha1)
return exit_clean
self.logger.info("Last sync to web-platform-tests happened in %s" % state.last_sync_commit.sha1)
class GetBaseCommit(Step):
"""Find the latest upstream commit on the branch that we are syncing with"""
provides = ["base_commit"]
def create(self, state):
state.base_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
self.logger.debug("New base commit is %s" % state.base_commit.sha1)
class LoadCommits(Step):
"""Get a list of commits in the gecko tree that need to be upstreamed"""
provides = ["source_commits"]
def create(self, state):
state.source_commits = state.local_tree.log(state.last_sync_commit,
state.tests_path)
update_regexp = re.compile("Bug \d+ - Update web-platform-tests to revision [0-9a-f]{40}")
for i, commit in enumerate(state.source_commits[:]):
if update_regexp.match(commit.message.text):
# This is a previous update commit so ignore it
state.source_commits.remove(commit)
continue
if commit.message.backouts:
#TODO: Add support for collapsing backouts
raise NotImplementedError("Need to get the Git->Hg commits for backouts and remove the backed out patch")
if not commit.message.bug:
self.logger.error("Commit %i (%s) doesn't have an associated bug number." %
(i + 1, commit.sha1))
return exit_unclean
self.logger.debug("Source commits: %s" % state.source_commits)
class SelectCommits(Step):
"""Provide a UI to select which commits to upstream"""
def create(self, state):
if not state.source_commits:
return
while True:
commits = state.source_commits[:]
for i, commit in enumerate(commits):
print "%i:\t%s" % (i, commit.message.summary)
remove = raw_input("Provide a space-separated list of any commits numbers to remove from the list to upstream:\n").strip()
remove_idx = set()
invalid = False
for item in remove.split(" "):
try:
item = int(item)
except:
invalid = True
break
if item < 0 or item >= len(commits):
invalid = True
break
remove_idx.add(item)
if invalid:
continue
keep_commits = [(i,cmt) for i,cmt in enumerate(commits) if i not in remove_idx]
#TODO: consider printed removed commits
print "Selected the following commits to keep:"
for i, commit in keep_commits:
print "%i:\t%s" % (i, commit.message.summary)
confirm = raw_input("Keep the above commits? y/n\n").strip().lower()
if confirm == "y":
state.source_commits = [item[1] for item in keep_commits]
break
class MovePatches(Step):
"""Convert gecko commits into patches against upstream and commit these to the sync tree."""
provides = ["commits_loaded"]
def create(self, state):
state.commits_loaded = 0
strip_path = os.path.relpath(state.tests_path,
state.local_tree.root)
self.logger.debug("Stripping patch %s" % strip_path)
for commit in state.source_commits[state.commits_loaded:]:
i = state.commits_loaded + 1
self.logger.info("Moving commit %i: %s" % (i, commit.message.full_summary))
patch = commit.export_patch(state.tests_path)
stripped_patch = rewrite_patch(patch, strip_path)
try:
state.sync_tree.import_patch(stripped_patch)
except:
print patch.diff
raise
state.commits_loaded = i
class RebaseCommits(Step):
"""Rebase commits from the current branch on top of the upstream destination branch.
This step is particularly likely to fail if the rebase generates merge conflicts.
In that case the conflicts can be fixed up locally and the sync process restarted
with --continue.
"""
provides = ["rebased_commits"]
def create(self, state):
self.logger.info("Rebasing local commits")
continue_rebase = False
# Check if there's a rebase in progress
if (os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-merge")) or
os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-apply"))):
continue_rebase = True
try:
state.sync_tree.rebase(state.base_commit, continue_rebase=continue_rebase)
except subprocess.CalledProcessError:
self.logger.info("Rebase failed, fix merge and run %s again with --continue" % sys.argv[0])
raise
state.rebased_commits = state.sync_tree.log(state.base_commit)
self.logger.info("Rebase successful")
class CheckRebase(Step):
"""Check if there are any commits remaining after rebase"""
def create(self, state):
if not state.rebased_commits:
self.logger.info("Nothing to upstream, exiting")
return exit_clean
class MergeUpstream(Step):
"""Run steps to push local commits as seperate PRs and merge upstream."""
provides = ["merge_index", "gh_repo"]
def create(self, state):
gh = GitHub(state.token)
if "merge_index" not in state:
state.merge_index = 0
org, name = urlparse.urlsplit(state.sync["remote_url"]).path[1:].split("/")
if name.endswith(".git"):
name = name[:-4]
state.gh_repo = gh.repo(org, name)
for commit in state.rebased_commits[state.merge_index:]:
with state.push(["gh_repo", "sync_tree"]):
state.commit = commit
pr_merger = PRMergeRunner(self.logger, state)
rv = pr_merger.run()
if rv is not None:
return rv
state.merge_index += 1
class UpdateLastSyncCommit(Step):
"""Update the gecko commit at which we last performed a sync with upstream."""
provides = []
def create(self, state):
self.logger.info("Updating last sync commit")
with open(state.last_sync_path, "w") as f:
f.write(state.local_tree.rev)
# This gets added to the patch later on
class MergeLocalBranch(Step):
"""Create a local branch pointing at the commit to upstream"""
provides = ["local_branch"]
def create(self, state):
branch_prefix = "sync_%s" % state.commit.sha1
local_branch = state.sync_tree.unique_branch_name(branch_prefix)
state.sync_tree.create_branch(local_branch, state.commit)
state.local_branch = local_branch
class MergeRemoteBranch(Step):
"""Get an unused remote branch name to use for the PR"""
provides = ["remote_branch"]
def create(self, state):
remote_branch = "sync_%s" % state.commit.sha1
branches = [ref[len("refs/heads/"):] for sha1, ref in
state.sync_tree.list_remote(state.gh_repo.url)
if ref.startswith("refs/heads")]
state.remote_branch = get_unique_name(branches, remote_branch)
class PushUpstream(Step):
"""Push local branch to remote"""
def create(self, state):
self.logger.info("Pushing commit upstream")
state.sync_tree.push(state.gh_repo.url,
state.local_branch,
state.remote_branch)
class CreatePR(Step):
"""Create a PR for the remote branch"""
provides = ["pr"]
def create(self, state):
self.logger.info("Creating a PR")
commit = state.commit
state.pr = state.gh_repo.create_pr(commit.message.full_summary,
state.remote_branch,
"master",
commit.message.body if commit.message.body else "")
class PRAddComment(Step):
"""Add an issue comment indicating that the code has been reviewed already"""
def create(self, state):
state.pr.issue.add_comment("Code reviewed upstream.")
class MergePR(Step):
"""Merge the PR"""
def create(self, state):
self.logger.info("Merging PR")
state.pr.merge()
class PRDeleteBranch(Step):
"""Delete the remote branch"""
def create(self, state):
self.logger.info("Deleting remote branch")
state.sync_tree.push(state.gh_repo.url, "", state.remote_branch)
class SyncToUpstreamRunner(StepRunner):
"""Runner for syncing local changes to upstream"""
steps = [LoadManifest,
CheckoutBranch,
GetLastSyncCommit,
GetBaseCommit,
LoadCommits,
SelectCommits,
MovePatches,
RebaseCommits,
CheckRebase,
MergeUpstream,
UpdateLastSyncCommit]
class PRMergeRunner(StepRunner):
"""(Sub)Runner for creating and merging a PR"""
steps = [
MergeLocalBranch,
MergeRemoteBranch,
PushUpstream,
CreatePR,
PRAddComment,
MergePR,
PRDeleteBranch,
]
| mpl-2.0 |
bigplus/thefuck | thefuck/rules/dirty_unzip.py | 5 | 1044 | import os
import zipfile
from thefuck.utils import for_app
def _is_bad_zip(file):
with zipfile.ZipFile(file, 'r') as archive:
return len(archive.namelist()) > 1
def _zip_file(command):
# unzip works that way:
# unzip [-flags] file[.zip] [file(s) ...] [-x file(s) ...]
# ^ ^ files to unzip from the archive
# archive to unzip
for c in command.script.split()[1:]:
if not c.startswith('-'):
if c.endswith('.zip'):
return c
else:
return '{}.zip'.format(c)
@for_app('unzip')
def match(command, settings):
return ('-d' not in command.script
and _is_bad_zip(_zip_file(command)))
def get_new_command(command, settings):
return '{} -d {}'.format(command.script, _zip_file(command)[:-4])
def side_effect(old_cmd, command, settings):
with zipfile.ZipFile(_zip_file(old_cmd), 'r') as archive:
for file in archive.namelist():
os.remove(file)
requires_output = False
| mit |
daevaorn/sentry | src/sentry/migrations/0026_auto__add_field_project_status.py | 36 | 11026 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.status'
db.add_column('sentry_project', 'status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.status'
db.delete_column('sentry_project', 'status')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'project_set'", 'to': "orm['sentry.User']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
tbeadle/django | tests/prefetch_related/test_prefetch_related_objects.py | 35 | 4734 | from django.db.models import Prefetch, prefetch_related_objects
from django.test import TestCase
from .models import Author, Book, Reader
class PrefetchRelatedObjectsTests(TestCase):
"""
Since prefetch_related_objects() is just the inner part of
prefetch_related(), only do basic tests to ensure its API hasn't changed.
"""
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
prefetch_related_objects([book1], 'unknown_attribute')
def test_m2m_forward(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], 'authors')
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_m2m_reverse(self):
author1 = Author.objects.get(id=self.author1.id)
with self.assertNumQueries(1):
prefetch_related_objects([author1], 'books')
with self.assertNumQueries(0):
self.assertEqual(set(author1.books.all()), {self.book1, self.book2})
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(authors, 'first_book')
with self.assertNumQueries(0):
[author.first_book for author in authors]
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(books, 'first_time_authors')
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
def test_m2m_then_m2m(self):
"""
We can follow a m2m and another m2m.
"""
authors = list(Author.objects.all())
with self.assertNumQueries(2):
prefetch_related_objects(authors, 'books__read_by')
with self.assertNumQueries(0):
self.assertEqual(
[
[[str(r) for r in b.read_by.all()] for b in a.books.all()]
for a in authors
],
[
[['Amy'], ['Belinda']], # Charlotte - Poems, Jane Eyre
[['Amy']], # Anne - Poems
[['Amy'], []], # Emily - Poems, Wuthering Heights
[['Amy', 'Belinda']], # Jane - Sense and Sense
]
)
def test_prefetch_object(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_prefetch_object_to_attr(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors', to_attr='the_authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.the_authors), {self.author1, self.author2, self.author3})
def test_prefetch_queryset(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects(
[book1],
Prefetch('authors', queryset=Author.objects.filter(id__in=[self.author1.id, self.author2.id]))
)
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2})
| bsd-3-clause |
huang4fstudio/django | tests/forms_tests/tests/test_fields.py | 5 | 86091 | # -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
def test_charfield_strip(self):
"""
Ensure that values have whitespace stripped and that strip=False works.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1.5')
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertIsInstance(f.clean('1'), Decimal)
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
@ignore_warnings(category=RemovedInDjango110Warning) # for _has_changed
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
# Test for deprecated behavior _has_changed
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
@ignore_warnings(category=RemovedInDjango110Warning) # error_message deprecation
def test_regexfield_4(self):
f = RegexField('^[0-9][0-9][0-9][0-9]$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('[email protected]\xe4\xf6\xfc\xdfabc.part.com',
f.clean('[email protected]äöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = '[email protected]'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertEqual('[email protected]', f.clean(' [email protected] \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, '[email protected]')
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, '[email protected]')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.bmp'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
self.assertFalse(f.has_changed('1', '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3')
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertIsNone(f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
| bsd-3-clause |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/universaldetector.py | 200 | 6664 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE:
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {
'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()
}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit |
Alwnikrotikz/l5rcm | dialogs/managedatapack.py | 3 | 5873 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2011 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PySide import QtCore, QtGui
import dal
class DataPackModel(QtCore.QAbstractTableModel):
def __init__(self, parent = None):
super(DataPackModel, self).__init__(parent)
self.items = []
self.headers = [self.tr('Name' ),
self.tr('Language'),
self.tr('Version'),
self.tr('Authors' ) ]
self.text_color = QtGui.QBrush(QtGui.QColor(0x15, 0x15, 0x15))
self.bg_color = [ QtGui.QBrush(QtGui.QColor(0xFF, 0xEB, 0x82)),
QtGui.QBrush(QtGui.QColor(0xEB, 0xFF, 0x82)) ]
self.item_size = QtCore.QSize(28, 28)
def rowCount(self, parent = QtCore.QModelIndex()):
return len(self.items)
def columnCount(self, parent = QtCore.QModelIndex()):
return len(self.headers)
def headerData(self, section, orientation, role = QtCore.Qt.ItemDataRole.DisplayRole):
if orientation != QtCore.Qt.Orientation.Horizontal:
return None
if role == QtCore.Qt.DisplayRole:
return self.headers[section]
return None
def data(self, index, role = QtCore.Qt.UserRole):
if not index.isValid() or index.row() >= len(self.items):
return None
item = self.items[index.row()]
if role == QtCore.Qt.DisplayRole:
if index.column() == 0:
return item.display_name
if index.column() == 1:
return item.language or self.tr("All")
if index.column() == 2:
return item.version or self.tr("N/A")
if index.column() == 3:
return ", ".join(item.authors) if ( item.authors is not None ) else ""
elif role == QtCore.Qt.ForegroundRole:
return self.text_color
elif role == QtCore.Qt.BackgroundRole:
return self.bg_color[ index.row() % 2 ]
elif role == QtCore.Qt.SizeHintRole:
return self.item_size
elif role == QtCore.Qt.UserRole:
return item
elif role == QtCore.Qt.CheckStateRole:
return self.__checkstate_role(item, index.column())
return None
def setData(self, index, value, role):
if not index.isValid():
return False
ret = False
item = self.items[index.row()]
self.dirty = True
if index.column() == 0 and role == QtCore.Qt.CheckStateRole:
item.active = (value == QtCore.Qt.Checked)
ret = True
else:
ret = super(DataPackModel, self).setData(index, value, role)
return ret
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsDropEnabled
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if index.column() == 0:
flags |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEditable
return flags
def __checkstate_role(self, item, column):
if column == 0:
return QtCore.Qt.Checked if item.active else QtCore.Qt.Unchecked
return None
def add_item(self, item):
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.items.append(item)
self.endInsertRows()
def clean(self):
self.beginResetModel()
self.items = []
self.endResetModel()
class ManageDataPackDlg(QtGui.QDialog):
def __init__(self, dstore, parent = None):
super(ManageDataPackDlg, self).__init__(parent)
self.dstore = dstore
self.build_ui ()
self.load_data()
def build_ui(self):
self.setWindowTitle(self.tr("Data Pack Manager"))
vbox = QtGui.QVBoxLayout(self)
grp = QtGui.QGroupBox (self.tr("Available data packs"))
self.view = QtGui.QTableView (self)
vbox2 = QtGui.QVBoxLayout(grp)
vbox2.addWidget(self.view)
bts = QtGui.QDialogButtonBox()
bts.addButton(self.tr("Discard"), QtGui.QDialogButtonBox.RejectRole)
bts.addButton(self.tr("Save"), QtGui.QDialogButtonBox.AcceptRole)
vbox.addWidget(grp)
vbox.addWidget(bts)
bts.accepted.connect( self.on_accept )
bts.rejected.connect( self.reject )
self.setMinimumSize( QtCore.QSize(440, 330) )
def load_data(self):
from copy import deepcopy
self.packs = deepcopy(self.dstore.packs)
model = DataPackModel(self)
for pack in self.packs:
model.add_item(pack)
self.view.setModel(model)
def on_accept(self):
self.dstore.packs = self.packs
self.accept()
| gpl-3.0 |
dbaxa/django | tests/model_regress/models.py | 281 | 2293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
# Test models with non-default primary keys / AutoFields #5218
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, models.CASCADE, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, models.CASCADE, unique=True, to_field='model1')
| bsd-3-clause |
brianrock/brianrock-ringo | handlers/poll.py | 1 | 7906 | # Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Needed to avoid ambiguity in imports
from __future__ import absolute_import
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api.labs import taskqueue
import logging
import os.path
import yaml
import time
import random
import re
import oauth
import buzz
import web.helper
import models.tokens
import models.board
OAUTH_CONFIG = yaml.load(open('oauth.yaml').read())
OAUTH_CONSUMER_KEY = OAUTH_CONFIG['oauth_consumer_key']
OAUTH_CONSUMER_SECRET = OAUTH_CONFIG['oauth_consumer_secret']
OAUTH_TOKEN_KEY = OAUTH_CONFIG['oauth_token_key']
OAUTH_TOKEN_SECRET = OAUTH_CONFIG['oauth_token_secret']
PRIORITY_PROFILES = yaml.load(open('polling.yaml').read())
BUZZ_BINGO_ID = '103831860836738334913'
class PollHandler(webapp.RequestHandler):
@property
def client(self):
if not hasattr(self, '_client') or not self._client:
access_token = oauth.OAuthToken(OAUTH_TOKEN_KEY, OAUTH_TOKEN_SECRET)
self._client = buzz.Client()
self._client.build_oauth_consumer(
OAUTH_CONSUMER_KEY, OAUTH_CONSUMER_SECRET
)
self._client.oauth_access_token = access_token
self._client.oauth_scopes.append(buzz.FULL_ACCESS_SCOPE)
return self._client
@property
def combined_results(self):
if not hasattr(self, '_combined_results') or not self._combined_results:
self._combined_results = []
try:
# Ignore the Buzz Bingo game itself
for post in self.client.posts(type_id='@consumption'):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
for post in self.client.search(query="buzzbingo"):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
except buzz.RetrieveError, e:
logging.warning(str(e))
logging.info('%d posts will be scored.' % len(self._combined_results))
return self._combined_results
def get(self):
cron = False
if self.request.headers.get('X-AppEngine-Cron') == 'true':
cron = True
elif self.request.headers.get('Referer') and \
self.request.headers.get('Referer').find('/_ah/admin/cron') != -1:
cron = True
if cron:
try:
result_task = taskqueue.Task(url='/worker/poll/')
result_task.add()
logging.info('Polling task enqueued...')
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
template_values = {
'http_get': True,
'message': None
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
def scan_post(self, post_id):
logging.info('Scanning post: %s' % post_id)
topics_found = set([])
players = set([])
nonexistent_players = set([])
ignored_players = set([])
scoring_players = set([])
post = self.client.post(post_id).data
if post.actor.id == BUZZ_BINGO_ID:
return None
post_uri = post.uri
comments = post.comments()
retrieved_comments = []
post_content = post.content.lower()
post_content = re.sub('<br />|\\r|\\n', ' ', post_content)
# Avoid false positive
post_content = re.sub('buzz ?bingo', 'BUZZBINGO', post_content)
if post_content.find('BUZZBINGO') != -1:
players.add(post.actor.id)
for topic in models.board.TOPIC_LIST:
if post_content.find(topic.lower()) != -1:
topics_found.add(topic)
if post_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for comment in comments:
# Need to avoid making unnecessary HTTP requests
retrieved_comments.append(comment)
comment_content = comment.content.lower()
comment_content = re.sub('<br />|\\r|\\n', ' ', comment_content)
# Avoid false positive
comment_content = re.sub('buzz ?bingo', 'BUZZBINGO', comment_content)
if comment_content.find('BUZZBINGO') != -1:
players.add(comment.actor.id)
for topic in models.board.TOPIC_LIST:
if comment_content.find(topic.lower()) != -1:
topics_found.add(topic)
if comment_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for player_id in players:
player = models.player.Player.get_by_key_name(player_id)
if player:
intersection = [
topic for topic in player.topics if topic in topics_found
]
if player.has_post_scored(post_id):
logging.info("Player already scored this.")
# Sometimes a bingo gets missed by retrying a transaction
db.run_in_transaction(player.verify_bingo)
elif intersection:
scoring_players.add(player)
scoring_topic = random.choice(intersection)
db.run_in_transaction(
player.score_post, post, scoring_topic
)
# Can't be run in the transaction, hopefully there won't be
# any nasty race conditions
player.award_leader_badge()
else:
ignored_players.add(player)
else:
nonexistent_players.add(player_id)
# Lots of logging, because this turns out to be tricky to get right.
topics_log_message = 'Topics found:\n'
for topic in topics_found:
topics_log_message += topic + '\n'
logging.info(topics_log_message)
scoring_log_message = 'Players scoring:\n'
for player in scoring_players:
scoring_log_message += '%s\n' % repr(player)
logging.info(scoring_log_message)
ignored_log_message = 'Players ignored and not scoring:\n'
for player in ignored_players:
ignored_log_message += '%s\n' % repr(player)
logging.info(ignored_log_message)
nonexistent_log_message = 'Players who might score if they signed up:\n'
for player_id in nonexistent_players:
nonexistent_log_message += '%s\n' % player_id
logging.info(nonexistent_log_message)
def post(self):
post_id = self.request.get('post_id')
message = ''
if post_id:
self.scan_post(post_id)
else:
for result in self.combined_results:
try:
if result.actor.profile_name in PRIORITY_PROFILES:
# Give priority access to profiles used in any demo.
countdown = 0
logging.info('Priority scan: %s' % result.id)
else:
# One second delay for everyone else, which should be fine.
countdown = 1
result_task = taskqueue.Task(
name="%s-%d" % (result.id[25:], int(time.time())),
params={
'post_id': result.id
},
url='/worker/poll/',
countdown=countdown
)
result_task.add()
logging.info('Scanning task enqueued: %s' % result.id)
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
message = 'Retrieved %d posts.' % len(self.combined_results)
template_values = {
'http_get': False,
'message': message
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
| apache-2.0 |
crowdata/crowdata | crowdataapp/migrations/0020_auto__add_field_documentset_header_image.py | 2 | 12999 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DocumentSet.header_image'
db.add_column(u'crowdataapp_documentset', 'header_image',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DocumentSet.header_image'
db.delete_column(u'crowdataapp_documentset', 'header_image')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
'entries_threshold_override': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'head_html': ('django.db.models.fields.TextField', [], {'default': '\'<!-- <script> or <link rel="stylesheet"> tags go here -->\'', 'null': 'True'}),
'header_image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'verify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'crowdataapp.documentsetrankingdefinition': {
'Meta': {'object_name': 'DocumentSetRankingDefinition'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rankings'", 'to': u"orm['crowdataapp.DocumentSet']"}),
'grouping_function': ('django.db.models.fields.CharField', [], {'default': "'SUM'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_fields'", 'to': u"orm['crowdataapp.DocumentSetFormField']"}),
'magnitude_field': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'magnitude_fields'", 'null': 'True', 'to': u"orm['crowdataapp.DocumentSetFormField']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sort_order': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'show_in_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | mit |
scenarios/tensorflow | tensorflow/python/ops/logging_ops.py | 10 | 13502 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op with the side effect of printing `data` when
evaluating.
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
Same tensor as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops._audio_summary_v2(tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops._merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramAccumulatorSummary")
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
| apache-2.0 |
lopopolo/hyperbola | hyperbola/urls.py | 1 | 1226 | """
hyperbola URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.urls import include, path
from .blog import urls as blog
from .contact import urls as contact
from .core.views import not_found
from .frontpage import urls as frontpage
from .lifestream import urls as lifestream
from .shortlinks import urls as shortlinks
urlpatterns = [
path("", include(frontpage)),
path("w/", include(blog)),
path("contact/", include(contact)),
path("lifestream/", include(lifestream)),
path("s/", include(shortlinks)),
path("404.html", not_found),
] + settings.ENVIRONMENT.additional_urls
| mit |
SergeyPirogov/selene | tests/integration/inner_selement_waiting_search_on_actions_like_click_test.py | 1 | 4742 | import pytest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selene import config
from selene.common.none_object import NoneObject
from selene.driver import SeleneDriver
from tests.acceptance.helpers.helper import get_test_driver
from tests.integration.helpers.givenpage import GivenPage
__author__ = 'yashaka'
driver = NoneObject('driver') # type: SeleneDriver
GIVEN_PAGE = NoneObject('GivenPage') # type: GivenPage
WHEN = GIVEN_PAGE # type: GivenPage
original_timeout = config.timeout
def setup_module(m):
global driver
driver = SeleneDriver.wrap(get_test_driver())
global GIVEN_PAGE
GIVEN_PAGE = GivenPage(driver)
global WHEN
WHEN = GIVEN_PAGE
def teardown_module(m):
driver.quit()
def setup_function(fn):
global original_timeout
def test_waits_for_inner_visibility():
GIVEN_PAGE\
.opened_with_body(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''')\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
250)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_for_inner_presence_in_dom_and_visibility():
GIVEN_PAGE.opened_with_body(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''')
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
250)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_inner_presence_in_dom_then_visibility():
GIVEN_PAGE.opened_with_body(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''')
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
250)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
500)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_parent_in_dom_then_inner_in_dom_then_visibility():
GIVEN_PAGE.opened_empty()
WHEN.load_body_with_timeout(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''',
250)
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
500)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
750)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_parent_in_dom_then_visible_then_inner_in_dom_then_visibility():
GIVEN_PAGE.opened_empty()
WHEN.load_body_with_timeout(
'''
<p style="display:none">
<h2 id="second">Heading 2</h2>
</p>''',
250)\
.execute_script_with_timeout(
'document.getElementsByTagName("p")[0].style = "display:block";',
500)
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
750)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
1000)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
# todo: there should be each such test method for each "passing" test from above...
def test_fails_on_timeout_during_waiting_for_inner_visibility():
config.timeout = 0.25
GIVEN_PAGE\
.opened_with_body(
'''
<p>
<a href='#second' style='display:none'>go to Heading 2</a>
<h2 id='second'>Heading 2</h2>
</p>''')\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
500)
with pytest.raises(TimeoutException):
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is False
| mit |
mitre/multiscanner | multiscanner/tests/test_parse_reports.py | 2 | 1531 | # -*- coding: utf-8 -*-
import multiscanner
def test_valid_reports_string():
reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == '{"file":{"Test":"result"}}'
def test_valid_reports_python():
reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "result"}}
def test_valid_utf8_string():
reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == u'{"file":{"Test":"안녕하세요"}}'
def test_valid_utf8_python():
reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "안녕하세요"}}
def test_invalid_utf8_string():
reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == u'{"file":{"Test":"\x97안녕하세요"}}' or r == u'{"file":{"Test":"\ufffd안녕하세요"}}'
def test_invalid_utf8_python():
reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "\x97안녕하세요"}} or r == {"file": {"Test": u"\ufffd안녕하세요"}}
| mpl-2.0 |
bsmrstu-warriors/Moytri---The-Drone-Aider | Lib/lib2to3/pgen2/driver.py | 98 | 4694 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| gpl-3.0 |
ddurst/zamboni | manage.py | 17 | 2113 | #!/usr/bin/env python
import logging
import os
import sys
from django.core.management import execute_from_command_line
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
if len(sys.argv) > 1 and sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings_test'
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkt.settings')
# waffle and mkt form an import cycle because mkt patches waffle and
# waffle loads the user model, so we have to make sure mkt gets
# imported before anything else imports waffle.
import mkt # noqa
import session_csrf # noqa
session_csrf.monkeypatch()
# Fix jinja's Markup class to not crash when localizers give us bad format
# strings.
from jinja2 import Markup # noqa
mod = Markup.__mod__
trans_log = logging.getLogger('z.trans')
# Load this early so that anything else you import will use these log settings.
# Mostly to shut Raven the hell up.
from lib.log_settings_base import log_configure # noqa
log_configure()
def new(self, arg):
try:
return mod(self, arg)
except Exception:
trans_log.error(unicode(self))
return ''
Markup.__mod__ = new
# Import for side-effect: configures our logging handlers.
# pylint: disable-msg=W0611
from lib.utils import update_csp, validate_modules, validate_settings # noqa
update_csp()
validate_modules()
validate_settings()
import django.conf # noqa
newrelic_ini = getattr(django.conf.settings, 'NEWRELIC_INI', None)
load_newrelic = False
# Monkey patches DRF to not use fqdn urls.
from mkt.api.patch import patch # noqa
patch()
if newrelic_ini:
import newrelic.agent # noqa
try:
newrelic.agent.initialize(newrelic_ini)
load_newrelic = True
except:
startup_logger = logging.getLogger('z.startup')
startup_logger.exception('Failed to load new relic config.')
# Alter zamboni to run on a particular port as per the
# marketplace docs, unless overridden.
from django.core.management.commands import runserver # noqa
runserver.DEFAULT_PORT = 2600
if __name__ == '__main__':
execute_from_command_line(sys.argv)
| bsd-3-clause |
ldesousa/pywps-4-demo | docs/conf.py | 3 | 10208 | # -*- coding: utf-8 -*-
#
# PyWPS-Flask documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 11 21:27:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyWPS-Flask'
copyright = u'2016, PyWPS Development Team'
author = u'PyWPS Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'4.2'
# The full version, including alpha/beta/rc tags.
release = u'4.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
#html_logo = '_static/pywps.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# alabaster settings
html_theme_options = {
'show_related': True,
'travis_button': True,
'github_banner': True,
'github_user': 'geopython',
'github_repo': 'pywps-flask',
'github_button': True,
'logo': 'pywps.png',
'logo_name': False
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# alabaster settings
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyWPS-Flaskdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyWPS-Flask.tex', u'PyWPS-Flask Documentation',
u'PyWPS Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pywps-flask', u'PyWPS-Flask Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyWPS-Flask', u'PyWPS-Flask Documentation',
author, 'PyWPS-Flask', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "http://github.com/geopython/pywps-flask/blob/master/%s.py" % filename
| mit |
rjschwei/WALinuxAgent | azurelinuxagent/common/utils/restutil.py | 1 | 18482 | # Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import re
import threading
import time
import traceback
import socket
import struct
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError
from azurelinuxagent.common.future import httpclient, urlparse, ustr
from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION
SECURE_WARNING_EMITTED = False
DEFAULT_RETRIES = 6
DELAY_IN_SECONDS = 1
THROTTLE_RETRIES = 25
THROTTLE_DELAY_IN_SECONDS = 1
REDACTED_TEXT = "<SAS_SIGNATURE>"
SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$')
RETRY_CODES = [
httpclient.RESET_CONTENT,
httpclient.PARTIAL_CONTENT,
httpclient.FORBIDDEN,
httpclient.INTERNAL_SERVER_ERROR,
httpclient.NOT_IMPLEMENTED,
httpclient.BAD_GATEWAY,
httpclient.SERVICE_UNAVAILABLE,
httpclient.GATEWAY_TIMEOUT,
httpclient.INSUFFICIENT_STORAGE,
429, # Request Rate Limit Exceeded
]
RESOURCE_GONE_CODES = [
httpclient.GONE
]
OK_CODES = [
httpclient.OK,
httpclient.CREATED,
httpclient.ACCEPTED
]
NOT_MODIFIED_CODES = [
httpclient.NOT_MODIFIED
]
HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [
502
]
THROTTLE_CODES = [
httpclient.FORBIDDEN,
httpclient.SERVICE_UNAVAILABLE,
429, # Request Rate Limit Exceeded
]
RETRY_EXCEPTIONS = [
httpclient.NotConnected,
httpclient.IncompleteRead,
httpclient.ImproperConnectionState,
httpclient.BadStatusLine
]
# http://www.gnu.org/software/wget/manual/html_node/Proxies.html
HTTP_PROXY_ENV = "http_proxy"
HTTPS_PROXY_ENV = "https_proxy"
NO_PROXY_ENV = "no_proxy"
HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION)
HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT)
INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration"
REQUEST_ROLE_CONFIG_FILE_NOT_FOUND = "RequestRoleConfigFileNotFound"
KNOWN_WIRESERVER_IP = '168.63.129.16'
HOST_PLUGIN_PORT = 32526
class IOErrorCounter(object):
_lock = threading.RLock()
_protocol_endpoint = KNOWN_WIRESERVER_IP
_counts = {"hostplugin":0, "protocol":0, "other":0}
@staticmethod
def increment(host=None, port=None):
with IOErrorCounter._lock:
if host == IOErrorCounter._protocol_endpoint:
if port == HOST_PLUGIN_PORT:
IOErrorCounter._counts["hostplugin"] += 1
else:
IOErrorCounter._counts["protocol"] += 1
else:
IOErrorCounter._counts["other"] += 1
@staticmethod
def get_and_reset():
with IOErrorCounter._lock:
counts = IOErrorCounter._counts.copy()
IOErrorCounter.reset()
return counts
@staticmethod
def reset():
with IOErrorCounter._lock:
IOErrorCounter._counts = {"hostplugin":0, "protocol":0, "other":0}
@staticmethod
def set_protocol_endpoint(endpoint=KNOWN_WIRESERVER_IP):
IOErrorCounter._protocol_endpoint = endpoint
def _compute_delay(retry_attempt=1, delay=DELAY_IN_SECONDS):
fib = (1, 1)
for n in range(retry_attempt):
fib = (fib[1], fib[0]+fib[1])
return delay*fib[1]
def _is_retry_status(status, retry_codes=RETRY_CODES):
return status in retry_codes
def _is_retry_exception(e):
return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0
def _is_throttle_status(status):
return status in THROTTLE_CODES
def _parse_url(url):
"""
Parse URL to get the components of the URL broken down to host, port
:rtype: string, int, bool, string
"""
o = urlparse(url)
rel_uri = o.path
if o.fragment:
rel_uri = "{0}#{1}".format(rel_uri, o.fragment)
if o.query:
rel_uri = "{0}?{1}".format(rel_uri, o.query)
secure = False
if o.scheme.lower() == "https":
secure = True
return o.hostname, o.port, secure, rel_uri
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def get_no_proxy():
no_proxy = os.environ.get(NO_PROXY_ENV) or os.environ.get(NO_PROXY_ENV.upper())
if no_proxy:
no_proxy = [host for host in no_proxy.replace(' ', '').split(',') if host]
# no_proxy in the proxies argument takes precedence
return no_proxy
def bypass_proxy(host):
no_proxy = get_no_proxy()
if no_proxy:
if is_ipv4_address(host):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(host, proxy_ip):
return True
elif host == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for proxy_domain in no_proxy:
if host.lower().endswith(proxy_domain.lower()):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
return False
def _get_http_proxy(secure=False):
# Prefer the configuration settings over environment variables
host = conf.get_httpproxy_host()
port = None
if not host is None:
port = conf.get_httpproxy_port()
else:
http_proxy_env = HTTPS_PROXY_ENV if secure else HTTP_PROXY_ENV
http_proxy_url = None
for v in [http_proxy_env, http_proxy_env.upper()]:
if v in os.environ:
http_proxy_url = os.environ[v]
break
if not http_proxy_url is None:
host, port, _, _ = _parse_url(http_proxy_url)
return host, port
def redact_sas_tokens_in_urls(url):
return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url)
def _http_request(method, host, rel_uri, port=None, data=None, secure=False,
headers=None, proxy_host=None, proxy_port=None):
headers = {} if headers is None else headers
headers['Connection'] = 'close'
use_proxy = proxy_host is not None and proxy_port is not None
if port is None:
port = 443 if secure else 80
if 'User-Agent' not in headers:
headers['User-Agent'] = HTTP_USER_AGENT
if use_proxy:
conn_host, conn_port = proxy_host, proxy_port
scheme = "https" if secure else "http"
url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri)
else:
conn_host, conn_port = host, port
url = rel_uri
if secure:
conn = httpclient.HTTPSConnection(conn_host,
conn_port,
timeout=10)
if use_proxy:
conn.set_tunnel(host, port)
else:
conn = httpclient.HTTPConnection(conn_host,
conn_port,
timeout=10)
logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]",
method,
redact_sas_tokens_in_urls(url),
data,
headers)
conn.request(method=method, url=url, body=data, headers=headers)
return conn.getresponse()
def http_request(method,
url, data, headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
global SECURE_WARNING_EMITTED
host, port, secure, rel_uri = _parse_url(url)
# Use the HTTP(S) proxy
proxy_host, proxy_port = (None, None)
if use_proxy and not bypass_proxy(host):
proxy_host, proxy_port = _get_http_proxy(secure=secure)
if proxy_host or proxy_port:
logger.verbose("HTTP proxy: [{0}:{1}]", proxy_host, proxy_port)
# If httplib module is not built with ssl support,
# fallback to HTTP if allowed
if secure and not hasattr(httpclient, "HTTPSConnection"):
if not conf.get_allow_http():
raise HttpError("HTTPS is unavailable and required")
secure = False
if not SECURE_WARNING_EMITTED:
logger.warn("Python does not include SSL support")
SECURE_WARNING_EMITTED = True
# If httplib module doesn't support HTTPS tunnelling,
# fallback to HTTP if allowed
if secure and \
proxy_host is not None and \
proxy_port is not None \
and not hasattr(httpclient.HTTPSConnection, "set_tunnel"):
if not conf.get_allow_http():
raise HttpError("HTTPS tunnelling is unavailable and required")
secure = False
if not SECURE_WARNING_EMITTED:
logger.warn("Python does not support HTTPS tunnelling")
SECURE_WARNING_EMITTED = True
msg = ''
attempt = 0
delay = 0
was_throttled = False
while attempt < max_retry:
if attempt > 0:
# Compute the request delay
# -- Use a fixed delay if the server ever rate-throttles the request
# (with a safe, minimum number of retry attempts)
# -- Otherwise, compute a delay that is the product of the next
# item in the Fibonacci series and the initial delay value
delay = THROTTLE_DELAY_IN_SECONDS \
if was_throttled \
else _compute_delay(retry_attempt=attempt,
delay=retry_delay)
logger.verbose("[HTTP Retry] "
"Attempt {0} of {1} will delay {2} seconds: {3}",
attempt+1,
max_retry,
delay,
msg)
time.sleep(delay)
attempt += 1
try:
resp = _http_request(method,
host,
rel_uri,
port=port,
data=data,
secure=secure,
headers=headers,
proxy_host=proxy_host,
proxy_port=proxy_port)
logger.verbose("[HTTP Response] Status Code {0}", resp.status)
if request_failed(resp):
if _is_retry_status(resp.status, retry_codes=retry_codes):
msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status)
# Note if throttled and ensure a safe, minimum number of
# retry attempts
if _is_throttle_status(resp.status):
was_throttled = True
max_retry = max(max_retry, THROTTLE_RETRIES)
continue
# If we got a 410 (resource gone) for any reason, raise an exception. The caller will handle it by
# forcing a goal state refresh and retrying the call.
if resp.status in RESOURCE_GONE_CODES:
response_error = read_response_error(resp)
raise ResourceGoneError(response_error)
# If we got a 400 (bad request) because the container id is invalid, it could indicate a stale goal
# state. The caller will handle this exception by forcing a goal state refresh and retrying the call.
if resp.status == httpclient.BAD_REQUEST:
response_error = read_response_error(resp)
if INVALID_CONTAINER_CONFIGURATION in response_error:
raise InvalidContainerError(response_error)
return resp
except httpclient.HTTPException as e:
clean_url = redact_sas_tokens_in_urls(url)
msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e)
if _is_retry_exception(e):
continue
break
except IOError as e:
IOErrorCounter.increment(host=host, port=port)
clean_url = redact_sas_tokens_in_urls(url)
msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e)
continue
raise HttpError("{0} -- {1} attempts made".format(msg, attempt))
def http_get(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("GET",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_head(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("HEAD",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_post(url,
data,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("POST",
url, data, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_put(url,
data,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("PUT",
url, data, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_delete(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("DELETE",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def request_failed(resp, ok_codes=OK_CODES):
return not request_succeeded(resp, ok_codes=ok_codes)
def request_succeeded(resp, ok_codes=OK_CODES):
return resp is not None and resp.status in ok_codes
def request_not_modified(resp):
return resp is not None and resp.status in NOT_MODIFIED_CODES
def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES):
"""
Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502
"""
return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes
def read_response_error(resp):
result = ''
if resp is not None:
try:
result = "[HTTP Failed] [{0}: {1}] {2}".format(
resp.status,
resp.reason,
resp.read())
# this result string is passed upstream to several methods
# which do a raise HttpError() or a format() of some kind;
# as a result it cannot have any unicode characters
if PY_VERSION_MAJOR < 3:
result = ustr(result, encoding='ascii', errors='ignore')
else:
result = result\
.encode(encoding='ascii', errors='ignore')\
.decode(encoding='ascii', errors='ignore')
result = textutil.replace_non_ascii(result)
except Exception:
logger.warn(traceback.format_exc())
return result
| apache-2.0 |
gregdek/ansible | lib/ansible/modules/packaging/os/apk.py | 82 | 11119 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <[email protected]>)
# and apt (Matthew Williams <[email protected]>) modules.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
version_added: "2.0"
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
type: bool
default: 'no'
version_added: "2.4"
name:
description:
- A package name, like C(foo), or multiple packages, like C(foo, bar).
repository:
description:
- A package repository or multiple repositories.
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
version_added: "2.4"
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
type: bool
default: 'no'
upgrade:
description:
- Upgrade all installed packages to their latest version.
type: bool
default: 'no'
notes:
- '"name" and "upgrade" are mutually exclusive.'
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk:
name: foo
update_cache: yes
# Update repositories and install "foo" and "bar" packages
- apk:
name: foo,bar
update_cache: yes
# Remove "foo" package
- apk:
name: foo
state: absent
# Remove "foo" and "bar" packages
- apk:
name: foo,bar
state: absent
# Install the package "foo"
- apk:
name: foo
state: present
# Install the packages "foo" and "bar"
- apk:
name: foo,bar
state: present
# Update repositories and update package "foo" to latest version
- apk:
name: foo
state: latest
update_cache: yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk:
name: foo,bar
state: latest
update_cache: yes
# Update all installed packages to the latest versions
- apk:
upgrade: yes
# Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
- apk:
available: yes
upgrade: yes
# Update repositories as a separate step
- apk:
update_cache: yes
# Install package from a specific repository
- apk:
name: foo
state: latest
update_cache: yes
repository: http://dl-3.alpinelinux.org/alpine/edge/main
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_toplevel(module, name):
# /etc/apk/world contains a list of top-level packages separated by ' ' or \n
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
with open('/etc/apk/world') as f:
content = f.read().split()
for p in content:
if regex.search(p):
return True
return False
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_toplevel(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
# Check to see if packages are still present because of dependencies
for name in installed:
if query_package(module, name):
rc = 1
break
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list'),
repository=dict(type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
available=dict(default='no', type='bool'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()
| gpl-3.0 |
blaze/dask | dask/base.py | 1 | 37839 | from collections import OrderedDict
from collections.abc import Mapping, Iterator
from contextlib import contextmanager
from functools import partial
from hashlib import md5
from operator import getitem
import inspect
import pickle
import os
import threading
import uuid
from distutils.version import LooseVersion
from tlz import merge, groupby, curry, identity
from tlz.functoolz import Compose
from .compatibility import is_dataclass, dataclass_fields
from .context import thread_state
from .core import flatten, quote, get as simple_get, literal
from .hashing import hash_buffer_hex
from .utils import Dispatch, ensure_dict, apply
from . import config, local, threaded
__all__ = (
"DaskMethodsMixin",
"annotate",
"is_dask_collection",
"compute",
"persist",
"optimize",
"visualize",
"tokenize",
"normalize_token",
)
@contextmanager
def annotate(**annotations):
"""Context Manager for setting HighLevelGraph Layer annotations.
Annotations are metadata or soft constraints associated with
tasks that dask schedulers may choose to respect: They signal intent
without enforcing hard constraints. As such, they are
primarily designed for use with the distributed scheduler.
Almost any object can serve as an annotation, but small Python objects
are preferred, while large objects such as NumPy arrays are discouraged.
Callables supplied as an annotation should take a single *key* argument and
produce the appropriate annotation. Individual task keys in the annotated collection
are supplied to the callable.
Parameters
----------
**annotations : key-value pairs
Examples
--------
All tasks within array A should have priority 100 and be retried 3 times
on failure.
>>> import dask
>>> import dask.array as da
>>> with dask.annotate(priority=100, retries=3):
... A = da.ones((10000, 10000))
Prioritise tasks within Array A on flattened block ID.
>>> nblocks = (10, 10)
>>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]):
... A = da.ones((1000, 1000), chunks=(100, 100))
Annotations may be nested.
>>> with dask.annotate(priority=1):
... with dask.annotate(retries=3):
... A = da.ones((1000, 1000))
... B = A + 1
"""
prev_annotations = config.get("annotations", {})
new_annotations = {
**prev_annotations,
**{f"annotations.{k}": v for k, v in annotations.items()},
}
with config.set(new_annotations):
yield
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin(object):
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename="mydask", format=None, optimize_graph=False, **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
return visualize(
self,
filename=filename,
format=format,
optimize_graph=optimize_graph,
**kwargs,
)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask array turns into a NumPy array and a Dask dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def __await__(self):
try:
from distributed import wait, futures_of
except ImportError as e:
raise ImportError(
"Using async/await with dask requires the `distributed` package"
) from e
from tornado import gen
@gen.coroutine
def f():
if futures_of(self):
yield wait(self)
raise gen.Return(self)
return f().__await__()
def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, "__dask_optimize__", dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
from .highlevelgraph import HighLevelGraph
optimizations = kwargs.pop("optimizations", None) or config.get("optimizations", [])
if optimize_graph:
groups = groupby(optimization_function, collections)
_opt_list = []
for opt, val in groups.items():
dsk, keys = _extract_graph_and_keys(val)
groups[opt] = (dsk, keys)
_opt = opt(dsk, keys, **kwargs)
_opt_list.append(_opt)
for opt in optimizations:
_opt_list = []
group = {}
for k, (dsk, keys) in groups.items():
_opt = opt(dsk, keys, **kwargs)
group[k] = (_opt, keys)
_opt_list.append(_opt)
groups = group
# Merge all graphs
if any(isinstance(graph, HighLevelGraph) for graph in _opt_list):
dsk = HighLevelGraph.merge(*_opt_list)
else:
dsk = merge(*map(ensure_dict, _opt_list))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``."""
from .highlevelgraph import HighLevelGraph
graphs, keys = [], []
for v in vals:
graphs.append(v.__dask_graph__())
keys.append(v.__dask_keys__())
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
graph = HighLevelGraph.merge(*graphs)
else:
graph = merge(*map(ensure_dict, graphs))
return graph, keys
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in dataclass_fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = d.optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, **kwargs)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = []
for a in collections:
r, s = a.__dask_postpersist__()
postpersists.append(r(dsk, *s))
return repack(postpersists)
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> d.compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> d.compute({'a': a, 'b': b, 'c': 1})
({'a': 45, 'b': 4.5, 'c': 1},)
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None),
collections=collections,
get=kwargs.pop("get", None),
)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postcomputes = [], []
for x in collections:
keys.append(x.__dask_keys__())
postcomputes.append(x.__dask_postcompute__())
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color : {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
collapse_outputs : bool, optional
Whether to collapse output boxes, which often have empty labels.
Default is False.
verbose : bool, optional
Whether to label output and input boxes even if the data aren't chunked.
Beware: these labels can get very long. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
filename = kwargs.pop("filename", "mydask")
optimize_graph = kwargs.pop("optimize_graph", False)
dsks = []
args3 = []
for arg in args:
if isinstance(arg, (list, tuple, set)):
for a in arg:
if isinstance(a, Mapping):
dsks.append(a)
if is_dask_collection(a):
args3.append(a)
else:
if isinstance(arg, Mapping):
dsks.append(arg)
if is_dask_collection(arg):
args3.append(arg)
dsk = dict(collections_to_dsk(args3, optimize_graph=optimize_graph))
for d in dsks:
dsk.update(d)
color = kwargs.get("color")
if color == "order":
from .order import order
import matplotlib.pyplot as plt
o = order(dsk)
try:
cmap = kwargs.pop("cmap")
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
mx = max(o.values()) + 1
colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}
kwargs["function_attributes"] = {
k: {"color": v, "label": str(o[k])} for k, v in colors.items()
}
kwargs["data_attributes"] = {k: {"color": v} for k, v in colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, **kwargs):
"""Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None), collections=collections
)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(
collections, optimize_graph=optimize_graph, **kwargs
)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
def tokenize(*args, **kwargs):
"""Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
normalize_token = Dispatch()
normalize_token.register(
(int, float, str, bytes, type(None), type, slice, complex, type(Ellipsis)), identity
)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
def func(seq):
try:
return list(map(normalize_token, seq))
except RecursionError:
return str(uuid.uuid4())
return type(seq).__name__, func(seq)
@normalize_token.register(literal)
def normalize_literal(lit):
return "literal", normalize_token(lit())
@normalize_token.register(range)
def normalize_range(r):
return list(map(normalize_token, [r.start, r.stop, r.step]))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, "__dask_tokenize__", None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, Compose):
first = getattr(func, "first", None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, (partial, curry)):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple(
(k, normalize_token(v)) for k, v in sorted(func.keywords.items())
)
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b"__main__" not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
# Intentionally not importing PANDAS_GT_0240 from dask.dataframe._compat
# to avoid ImportErrors from extra dependencies
PANDAS_GT_0240 = LooseVersion(pd.__version__) >= LooseVersion("0.24.0")
@normalize_token.register(pd.Index)
def normalize_index(ind):
if PANDAS_GT_0240:
values = ind.array
else:
values = ind.values
return [ind.name, normalize_token(values)]
@normalize_token.register(pd.MultiIndex)
def normalize_index(ind):
codes = ind.codes if PANDAS_GT_0240 else ind.levels
return (
[ind.name]
+ [normalize_token(x) for x in ind.levels]
+ [normalize_token(x) for x in codes]
)
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes), normalize_token(cat.dtype)]
if PANDAS_GT_0240:
@normalize_token.register(pd.arrays.PeriodArray)
@normalize_token.register(pd.arrays.DatetimeArray)
@normalize_token.register(pd.arrays.TimedeltaArray)
def normalize_period_array(arr):
return [normalize_token(arr.asi8), normalize_token(arr.dtype)]
@normalize_token.register(pd.arrays.IntervalArray)
def normalize_interval_array(arr):
return [
normalize_token(arr.left),
normalize_token(arr.right),
normalize_token(arr.closed),
]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [
s.name,
s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index),
]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data.extend([df.columns, df.index])
return list(map(normalize_token, data))
@normalize_token.register(pd.api.extensions.ExtensionArray)
def normalize_extension_array(arr):
import numpy as np
return normalize_token(np.asarray(arr))
# Dtypes
@normalize_token.register(pd.api.types.CategoricalDtype)
def normalize_categorical_dtype(dtype):
return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]
@normalize_token.register(pd.api.extensions.ExtensionDtype)
def normalize_period_dtype(dtype):
return normalize_token(dtype.name)
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (x.item(), x.dtype)
if hasattr(x, "mode") and getattr(x, "filename", None):
if hasattr(x.base, "ctypes"):
offset = (
x.ctypes.get_as_parameter().value
- x.base.ctypes.get_as_parameter().value
)
else:
offset = 0 # root memmap's have mmap object as base
if hasattr(
x, "offset"
): # offset numpy used while opening, and not the offset to the beginning of the file
offset += getattr(x, "offset")
return (
x.filename,
os.path.getmtime(x.filename),
x.dtype,
x.shape,
x.strides,
offset,
)
if x.dtype.hasobject:
try:
try:
# string fast-path
data = hash_buffer_hex(
"-".join(x.flat).encode(
encoding="utf-8", errors="surrogatepass"
)
)
except UnicodeDecodeError:
# bytes fast-path
data = hash_buffer_hex(b"-".join(x.flat))
except (TypeError, UnicodeDecodeError):
try:
data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
except Exception:
# pickling not supported, use UUID4-based fallback
data = uuid.uuid4().hex
else:
try:
data = hash_buffer_hex(x.ravel(order="K").view("i1"))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order="K").view("i1"))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return "np." + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return (
type(x).__name__,
normalize_seq((normalize_token(getattr(x, key)) for key in attrs)),
)
for cls, attrs in [
(sp.dia_matrix, ("data", "offsets", "shape")),
(sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")),
(sp.coo_matrix, ("data", "row", "col", "shape")),
(sp.csr_matrix, ("data", "indices", "indptr", "shape")),
(sp.csc_matrix, ("data", "indices", "indptr", "shape")),
(sp.lil_matrix, ("data", "rows", "shape")),
]:
normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
"""Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = "0" * (6 - len(h)) + h
return "#" + h
named_schedulers = {
"sync": local.get_sync,
"synchronous": local.get_sync,
"single-threaded": local.get_sync,
"threads": threaded.get,
"threading": threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update(
{
"processes": dask_multiprocessing.get,
"multiprocessing": dask_multiprocessing.get,
}
)
get_err_msg = """
The get= keyword has been removed.
Please use the scheduler= keyword instead with the name of
the desired scheduler like 'threads' or 'processes'
x.compute(scheduler='single-threaded')
x.compute(scheduler='threads')
x.compute(scheduler='processes')
or with a function that takes the graph and keys
x.compute(scheduler=my_scheduler_function)
or with a Dask client
x.compute(scheduler=client)
""".strip()
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
"""Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in scheduler= parameters
2. Passing these into global configuration
3. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get:
raise TypeError(get_err_msg)
if scheduler is not None:
if callable(scheduler):
return scheduler
elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"):
return scheduler.get
elif scheduler.lower() in named_schedulers:
return named_schedulers[scheduler.lower()]
elif scheduler.lower() in ("dask.distributed", "distributed"):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError(
"Expected one of [distributed, %s]"
% ", ".join(sorted(named_schedulers))
)
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get("scheduler", None):
return get_scheduler(scheduler=config.get("scheduler", None))
if config.get("get", None):
raise ValueError(get_err_msg)
if getattr(thread_state, "key", False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError(
"Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `dask.config.set`."
)
return get
return None
def wait(x, timeout=None, return_when="ALL_COMPLETED"):
"""Wait until computation has finished
This is a compatibility alias for ``dask.distributed.wait``.
If it is applied onto Dask collections without Dask Futures or if Dask
distributed is not installed then it is a no-op
"""
try:
from distributed import wait
return wait(x, timeout=timeout, return_when=return_when)
except (ImportError, ValueError):
return x
| bsd-3-clause |
sjohannes/exaile | xl/player/__init__.py | 3 | 1566 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
"""
Allows for playback and queue control
"""
__all__ = ['adapters', 'gst', 'queue', 'PLAYER', 'QUEUE']
import os
from xl import xdg
from . import player
from . import queue
PLAYER = player.ExailePlayer('player')
QUEUE = queue.PlayQueue(
PLAYER, 'queue', location=os.path.join(xdg.get_data_dir(), 'queue.state')
)
| gpl-2.0 |
SDSG-Invenio/invenio | invenio/modules/messages/testsuite/test_messages.py | 15 | 5465 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebMessage."""
__revision__ = \
"$Id$"
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
webmessage_mailutils = lazy_import('invenio.utils.mail')
class TestQuotingMessage(InvenioTestCase):
"""Test for quoting messages."""
def test_simple_quoting_per_block(self):
"""webmessage - test quoting simple message (HTML, per block)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo<br/>
I received your mail<br/>
<div class="commentbox">
\tWould you like to come with me to the restaurant?<br/>
</div>
Of course!<br/>
<div class="commentbox">
\t<div class="commentbox">
\t\tWhen could we get together?<br/>
\t</div>
</div>
Reply to my question please.<br/>
see you...<br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<div class="commentbox">', "</div>"),
linebreak_html='<br/>')
self.assertEqual(res, expected_text)
def test_simple_quoting_per_line(self):
"""webmessage - test quoting simple message (HTML, per line)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
>>I discovered a really nice one.
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo <br/>
I received your mail <br/>
<blockquote><div>Would you like to come with me to the restaurant? </div></blockquote> <br/>
<blockquote><div>I discovered a really nice one. </div></blockquote> <br/>
Of course! <br/>
<blockquote><div><blockquote><div>When could we get together? </div></blockquote> </div></blockquote> <br/>
Reply to my question please. <br/>
see you... <br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<blockquote><div>', ' </div></blockquote>'),
linebreak_html=" <br/>",
indent_block=False)
self.assertEqual(res, expected_text)
def test_quoting_message(self):
"""webmessage - test quoting message (text)"""
text = """C'est un lapin, lapin de bois.
>>Quoi?
Un cadeau.
>>What?
A present.
>>Oh, un cadeau"""
expected_text = """>>C'est un lapin, lapin de bois.
>>>>Quoi?
>>Un cadeau.
>>>>What?
>>A present.
>>>>Oh, un cadeau
"""
res = webmessage_mailutils.email_quote_txt(text,
indent_txt='>>',
linebreak_input="\n",
linebreak_output="\n")
self.assertEqual(res, expected_text)
def test_indenting_rule_message(self):
"""webmessage - return email-like indenting rule"""
text = """>>Brave Sir Robin ran away...
<img src="malicious_script"/>*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
expected_text = """>>Brave Sir Robin ran away...
<img src="malicious_script" />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
res = webmessage_mailutils.escape_email_quoted_text(text,
indent_txt='>>',
linebreak_txt='\n')
self.assertEqual(res, expected_text)
TEST_SUITE = make_test_suite(TestQuotingMessage)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
Mythirion/VirtualRobot | requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
ccn-2m/django | django/contrib/localflavor/be/forms.py | 194 | 2910 | """
Belgium-specific Form helpers
"""
from __future__ import absolute_import
from django.contrib.localflavor.be.be_provinces import PROVINCE_CHOICES
from django.contrib.localflavor.be.be_regions import REGION_CHOICES
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
class BEPostalCodeField(RegexField):
"""
A form field that validates its input as a belgium postal code.
Belgium postal code is a 4 digits string. The first digit indicates
the province (except for the 3ddd numbers that are shared by the
eastern part of Flemish Brabant and Limburg and the and 1ddd that
are shared by the Brussels Capital Region, the western part of
Flemish Brabant and Walloon Brabant)
"""
default_error_messages = {
'invalid': _(
'Enter a valid postal code in the range and format 1XXX - 9XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPostalCodeField, self).__init__(r'^[1-9]\d{3}$',
max_length, min_length, *args, **kwargs)
class BEPhoneNumberField(RegexField):
"""
A form field that validates its input as a belgium phone number.
Landlines have a seven-digit subscriber number and a one-digit area code,
while smaller cities have a six-digit subscriber number and a two-digit
area code. Cell phones have a six-digit subscriber number and a two-digit
area code preceeded by the number 4.
0d ddd dd dd, 0d/ddd.dd.dd, 0d.ddd.dd.dd,
0dddddddd - dialling a bigger city
0dd dd dd dd, 0dd/dd.dd.dd, 0dd.dd.dd.dd,
0dddddddd - dialling a smaller city
04dd ddd dd dd, 04dd/ddd.dd.dd,
04dd.ddd.dd.dd, 04ddddddddd - dialling a mobile number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats '
'0x xxx xx xx, 0xx xx xx xx, 04xx xx xx xx, '
'0x/xxx.xx.xx, 0xx/xx.xx.xx, 04xx/xx.xx.xx, '
'0x.xxx.xx.xx, 0xx.xx.xx.xx, 04xx.xx.xx.xx, '
'0xxxxxxxx or 04xxxxxxxx.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPhoneNumberField, self).__init__(r'^[0]\d{1}[/. ]?\d{3}[. ]\d{2}[. ]?\d{2}$|^[0]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$|^[0][4]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$',
max_length, min_length, *args, **kwargs)
class BERegionSelect(Select):
"""
A Select widget that uses a list of belgium regions as its choices.
"""
def __init__(self, attrs=None):
super(BERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class BEProvinceSelect(Select):
"""
A Select widget that uses a list of belgium provinces as its choices.
"""
def __init__(self, attrs=None):
super(BEProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
| bsd-3-clause |
alessandro-aglietti/rosdep | src/rosdep2/catkin_support.py | 7 | 4024 | """
Helper routines for catkin. These are distributed inside of rosdep2
to protect catkin against future rosdep2 API updatese. These helper
routines are assumed to run in an interactive mode with an end-user
and thus return end-user oriented error messages.
Errors are returned as arguments to raised :exc:`ValidationFailed`
exceptions.
Workflow::
installer = get_installer(APT_INSTALLER)
view = get_catkin_view(rosdistro_name, 'ubuntu', 'lucid')
resolve_for_os(rosdep_key, view, installer, 'ubuntu', 'lucid')
"""
from __future__ import print_function
import os
from subprocess import Popen, PIPE, CalledProcessError
from . import create_default_installer_context
from .lookup import RosdepLookup
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.pip import PIP_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .rep3 import download_targets_data
from .rosdistrohelper import get_targets
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import get_sources_list_dir, DataSourceMatcher, SourcesListLoader
class ValidationFailed(Exception):
pass
def call(command, pipe=None):
"""
Copy of call() function from catkin-generate-debian to mimic output
"""
working_dir = '.'
#print('+ cd %s && ' % working_dir + ' '.join(command))
process = Popen(command, stdout=pipe, stderr=pipe, cwd=working_dir)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, command)
if pipe:
return output
def get_ubuntu_targets(rosdistro):
"""
Get a list of Ubuntu distro codenames for the specified ROS
distribution. This method blocks on an HTTP download.
:raises: :exc:`ValidationFailed`
"""
targets_data = get_targets()
legacy_targets = download_targets_data()
if 'fuerte' in legacy_targets:
targets_data['fuerte'] = {'ubuntu': legacy_targets['fuerte']}
if 'electric' in legacy_targets:
targets_data['electric'] = {'ubuntu': legacy_targets['electric']}
return targets_data[rosdistro]['ubuntu']
def get_installer(installer_name):
""" Expected installers APT_INSTALLER, YUM_INSTALLER, ..."""
installer_context = create_default_installer_context()
return installer_context.get_installer(installer_name)
def resolve_for_os(rosdep_key, view, installer, os_name, os_version):
"""
Resolve rosdep key to dependencies.
:param os_name: OS name, e.g. 'ubuntu'
:raises: :exc:`rosdep2.ResolutionError`
"""
d = view.lookup(rosdep_key)
ctx = create_default_installer_context()
os_installers = ctx.get_os_installer_keys(os_name)
default_os_installer = ctx.get_default_os_installer_key(os_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, os_installers, default_os_installer)
assert inst_key in os_installers
return installer.resolve(rule)
def update_rosdep():
call(('rosdep', 'update'), pipe=PIPE)
def get_catkin_view(rosdistro_name, os_name, os_version, update=True):
"""
:raises: :exc:`ValidationFailed`
"""
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
raise ValidationFailed("""rosdep database is not initialized, please run:
\tsudo rosdep init
""")
if update:
update_rosdep()
sources_matcher = DataSourceMatcher([rosdistro_name, os_name, os_version])
sources_loader = SourcesListLoader.create_default(matcher=sources_matcher)
if not (sources_loader.sources):
raise ValidationFailed("""rosdep database does not have any sources.
Please make sure you have a valid configuration in:
\t%s
"""%(sources_list_dir))
# for vestigial reasons, using the roskg loader, but we're only
# actually using the backend db as resolution is not resource-name based
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
return lookup.get_rosdep_view(DEFAULT_VIEW_KEY)
| bsd-3-clause |
mammique/django | django/template/loaders/filesystem.py | 225 | 1851 | """
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
| bsd-3-clause |
AndroidOpenDevelopment/android_external_chromium_org | tools/idl_parser/idl_lexer_test.py | 116 | 2758 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from idl_lexer import IDLLexer
from idl_ppapi_lexer import IDLPPAPILexer
#
# FileToTokens
#
# From a source file generate a list of tokens.
#
def FileToTokens(lexer, filename):
with open(filename, 'rb') as srcfile:
lexer.Tokenize(srcfile.read(), filename)
return lexer.GetTokens()
#
# TextToTokens
#
# From a source file generate a list of tokens.
#
def TextToTokens(lexer, text):
lexer.Tokenize(text)
return lexer.GetTokens()
class WebIDLLexer(unittest.TestCase):
def setUp(self):
self.lexer = IDLLexer()
self.filenames = [
'test_lexer/values.in',
'test_lexer/keywords.in'
]
#
# testRebuildText
#
# From a set of tokens, generate a new source text by joining with a
# single space. The new source is then tokenized and compared against the
# old set.
#
def testRebuildText(self):
for filename in self.filenames:
tokens1 = FileToTokens(self.lexer, filename)
to_text = '\n'.join(['%s' % t.value for t in tokens1])
tokens2 = TextToTokens(self.lexer, to_text)
count1 = len(tokens1)
count2 = len(tokens2)
self.assertEqual(count1, count2)
for i in range(count1):
msg = 'Value %s does not match original %s on line %d of %s.' % (
tokens2[i].value, tokens1[i].value, tokens1[i].lineno, filename)
self.assertEqual(tokens1[i].value, tokens2[i].value, msg)
#
# testExpectedType
#
# From a set of tokens pairs, verify the type field of the second matches
# the value of the first, so that:
# integer 123 float 1.1 ...
# will generate a passing test, when the first token has both the type and
# value of the keyword integer and the second has the type of integer and
# value of 123 and so on.
#
def testExpectedType(self):
for filename in self.filenames:
tokens = FileToTokens(self.lexer, filename)
count = len(tokens)
self.assertTrue(count > 0)
self.assertFalse(count & 1)
index = 0
while index < count:
expect_type = tokens[index].value
actual_type = tokens[index + 1].type
msg = 'Type %s does not match expected %s on line %d of %s.' % (
actual_type, expect_type, tokens[index].lineno, filename)
index += 2
self.assertEqual(expect_type, actual_type, msg)
class PepperIDLLexer(WebIDLLexer):
def setUp(self):
self.lexer = IDLPPAPILexer()
self.filenames = [
'test_lexer/values_ppapi.in',
'test_lexer/keywords_ppapi.in'
]
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rversteegen/commandergenius | project/jni/python/src/Lib/lib2to3/pgen2/tokenize.py | 52 | 16184 | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| lgpl-2.1 |
gioman/QGIS | python/plugins/processing/algs/examplescripts/ProcessingExampleScriptsPlugin.py | 9 | 1441 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.Processing import Processing
class ProcessingExampleScriptsPlugin(object):
def initGui(self):
Processing.addScripts(os.path.join(os.path.dirname(__file__), "scripts"))
def unload(self):
Processing.removeScripts(os.path.join(os.path.dirname(__file__), "scripts"))
| gpl-2.0 |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/test/test_pow.py | 177 | 4593 | import test.support, unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0, 31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in (int,):
for i in list(range(-10, 0)) + list(range(1, 10)):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == int:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powlong(self):
self.powtest(int)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(int(i),j) % k,
pow(int(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
def test_main():
test.support.run_unittest(PowTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
salomanders/NbodyPythonTools | nbdpt/nchiladareader.py | 1 | 4326 | import numpy as np
import glob
import struct
import pdb
import re
class Nchilada(object):
def __init__(self, filename):
self.codedict = {1: 'int8',
2: 'uint8',
3: 'int16',
4: 'uint16',
5: 'int32',
6: 'uint32',
7: 'int64',
8: 'uint64',
9: 'float32',
10: 'float64'}
self.fmt_codedict = {1: 'h',
2: 'H',
3: 'i',
4: 'I',
5: 'l',
6: 'L',
7: 'q',
8: 'Q',
9: 'f',
10: 'd'}
self.codedictlen = {1: 1
}
self.filename = filename
def read_param(self):
try:
paramfilename = [f for f in glob.glob('*.param') if re.match('^(cosmo|h)', f)][0]
except IndexError:
try:
paramfilename = [f for f in glob.glob('../*.param') if re.match('^(cosmo|h)', f)][0]
print 'There is no param file in this directory, trying one up'
except IndexError:
print "Can't find param file"
return
f = open(paramfilename, 'rb')
paramfile = {}
for line in f:
try:
if line[0] != '#':
s = line.split('#')[0].split()
paramfile[s[0]] = "".join(s[2:])
except IndexError, ValueError:
pass
self.paramfile = paramfile
dKpcUnit = np.float(paramfile['dKpcUnit'])
dMsolUnit = np.float(paramfile['dMsolUnit'])
self.timeunit=np.sqrt((dKpcUnit*3.086e21)**3/
(6.67e-8*dMsolUnit*1.99e33)
)/(3600.*24.*365.24*1e9)
try: hub = np.float(paramfile['dHubble0'])
except KeyError: hub=0.
dunit = np.float(paramfile['dKpcUnit'])
munit = np.float(paramfile['dMsolUnit'])
denunit = munit/dunit**3.
self.velunit = 8.0285*np.sqrt(6.6743e-8*denunit)*dunit
hubunit = 10.*self.velunit/dunit
self.h = hub*hubunit
f.close()
def unpack_header(self, family, file):
f = open(self.filename+'/'+family+'/'+file)
(magic, time, iHighWord, nbodies, ndim, code) = struct.unpack('>idiiii', f.read(28))
if (ndim < 1) or (ndim > 3):
f.seek(0)
(magic, time, iHighWord, nbodies, ndim, code) = struct.unpack('<idiiii', f.read(28))
self.byte_swap = True
f.close()
return(time, nbodies, ndim, code)
def unpack_file(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
dtype = np.dtype({'names':('array',),'formats':('>'+self.fmt_codedict[code],)})
ar = np.core.records.fromfile(f, dtype=dtype, shape=self.nbodies)
return ar['array']
def minvalue(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
return minvalue
def maxvalue(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
return maxvalue
| mit |
fanwenl/kindle-image | alien_invasion/setting.py | 1 | 1456 | """ 该文件是游戏的一些设置选项 """
class Settings():
""" 存储游戏的所有设置的类 """
def __init__(self):
""" 初始化游戏的设置 """
self.screen_width = 1920
self.screen_height = 900
self.bg_color = (230, 230, 230)
# 飞船设置
self.ship_limit = 3
# 设置子弹
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 5
# 外星人设置
self.fleet_drop_speed = 10
# 以什么样的速度加快游戏
self.speedup_scale = 1.1
# 外星人点数的提高速度
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
""" 初始化随游戏进行而改变的设置 """
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 1
self.alien_speed_factor = 1
# direction为1表示向右,为-1表示向左
self.fleet_direction = 1
# 记分
self.alien_points = 50
def increase_speed(self):
""" 提高速度设置 """
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
# 击杀外星人的每个得分
self.alien_points = int(self.alien_points * self.score_scale) | apache-2.0 |
tima/ansible | lib/ansible/modules/network/nxos/nxos_system.py | 31 | 11749 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_system
extends_documentation_fragment: nxos
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco NXOS devices
description:
- This module provides declarative management of node system attributes
on Cisco NXOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configures the default domain
name suffix to be used when referencing this node by its
FQDN. This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
domain_lookup:
description:
- Enables or disables the DNS
lookup feature in Cisco NXOS. This argument accepts boolean
values. When enabled, the system will try to resolve hostnames
using DNS and when disabled, hostnames will not be resolved.
domain_search:
description:
- Configures a list of domain
name suffixes to search when performing DNS name resolution.
This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers or
a list of hashes that configure the name server and VRF name. See
examples.
system_mtu:
description:
- Specifies the mtu, must be an integer.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
nxos_system:
hostname: nxos01
domain_name: test.example.com
- name: remove configuration
nxos_system:
state: absent
- name: configure name servers
nxos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
- name: configure name servers with VRF support
nxos_system:
name_servers:
- { server: 8.8.8.8, vrf: mgmt }
- { server: 8.8.4.4, vrf: mgmt }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname nxos01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall(r'vrf context (\S+)', config)
return vrf in _CONFIGURED_VRFS
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) and (want.get(x) != have.get(x))
def difference(x, y, z):
return [item for item in x[z] if item not in y[z]]
def remove(cmd, commands, vrf=None):
if vrf:
commands.append('vrf context %s' % vrf)
commands.append(cmd)
if vrf:
commands.append('exit')
def add(cmd, commands, vrf=None):
if vrf:
if not has_vrf(module, vrf):
module.fail_json(msg='invalid vrf name %s' % vrf)
return remove(cmd, commands, vrf)
if state == 'absent':
if have['hostname']:
commands.append('no hostname')
for item in have['domain_name']:
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['domain_search']:
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['name_servers']:
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
if have['system_mtu']:
commands.append('no system jumbomtu')
if state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_lookup'):
cmd = 'ip domain-lookup'
if want['domain_lookup'] is False:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['domain_name']:
for item in difference(have, want, 'domain_name'):
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_name'):
cmd = 'ip domain-name %s' % item['name']
add(cmd, commands, item['vrf'])
if want['domain_search']:
for item in difference(have, want, 'domain_search'):
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_search'):
cmd = 'ip domain-list %s' % item['name']
add(cmd, commands, item['vrf'])
if want['name_servers']:
for item in difference(have, want, 'name_servers'):
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'name_servers'):
cmd = 'ip name-server %s' % item['server']
add(cmd, commands, item['vrf'])
if needs_update('system_mtu'):
commands.append('system jumbomtu %s' % want['system_mtu'])
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
def parse_domain_name(config, vrf_config):
objects = list()
regex = re.compile(r'ip domain-name (\S+)')
match = regex.search(config, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
match = regex.search(cfg, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': vrf})
return objects
def parse_domain_search(config, vrf_config):
objects = list()
for item in re.findall(r'^ip domain-list (\S+)', config, re.M):
objects.append({'name': item, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
for item in re.findall(r'ip domain-list (\S+)', cfg, re.M):
objects.append({'name': item, 'vrf': vrf})
return objects
def parse_name_servers(config, vrf_config, vrfs):
objects = list()
match = re.search('^ip name-server (.+)$', config, re.M)
if match:
for addr in match.group(1).split(' '):
if addr == 'use-vrf' or addr in vrfs:
continue
objects.append({'server': addr, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
vrf_match = re.search('ip name-server (.+)', cfg, re.M)
if vrf_match:
for addr in vrf_match.group(1).split(' '):
objects.append({'server': addr, 'vrf': vrf})
return objects
def parse_system_mtu(config):
match = re.search(r'^system jumbomtu (\d+)', config, re.M)
if match:
return int(match.group(1))
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=2, contents=config)
vrf_config = {}
vrfs = re.findall(r'^vrf context (\S+)$', config, re.M)
for vrf in vrfs:
config_data = configobj.get_block_config(path=['vrf context %s' % vrf])
vrf_config[vrf] = config_data
return {
'hostname': parse_hostname(config),
'domain_lookup': 'no ip domain-lookup' not in config,
'domain_name': parse_domain_name(config, vrf_config),
'domain_search': parse_domain_search(config, vrf_config),
'name_servers': parse_name_servers(config, vrf_config, vrfs),
'system_mtu': parse_system_mtu(config)
}
def validate_system_mtu(value, module):
if not 1500 <= value <= 9216:
module.fail_json(msg='system_mtu must be between 1500 and 9216')
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'domain_lookup': module.params['domain_lookup'],
'system_mtu': module.params['system_mtu']
}
domain_name = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
domain_search = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict()
), module)
for arg, cast in [('domain_name', domain_name), ('domain_search', domain_search),
('name_servers', name_servers)]:
if module.params[arg] is not None:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_lookup=dict(type='bool'),
# { name: <str>, vrf: <str> }
domain_name=dict(type='list'),
# {name: <str>, vrf: <str> }
domain_search=dict(type='list'),
# { server: <str>; vrf: <str> }
name_servers=dict(type='list'),
system_mtu=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
40223245/2015cdb_g6-team1 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_program.py | 738 | 10833 | import io
import os
import sys
import unittest
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest.defaultTestLoader
module = '__main__'
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertTrue(getattr(program, attr))
for opt in short_opt, long_opt:
not_none = object()
setattr(program, attr, not_none)
program.parseArgs([None, opt])
self.assertEqual(getattr(program, attr), not_none)
def testWarning(self):
"""Test the warnings argument"""
# see #10535
class FakeTP(unittest.TestProgram):
def parseArgs(self, *args, **kw): pass
def runTests(self, *args, **kw): pass
warnoptions = sys.warnoptions[:]
try:
sys.warnoptions[:] = []
# no warn options, no arg -> default
self.assertEqual(FakeTP().warnings, 'default')
# no warn options, w/ arg -> arg value
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
sys.warnoptions[:] = ['somevalue']
# warn options, no arg -> None
# warn options, w/ arg -> arg value
self.assertEqual(FakeTP().warnings, None)
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
finally:
sys.warnoptions[:] = warnoptions
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.warnings = 'warnings'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer',
'warnings': 'warnings'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
def _patch_isfile(self, names, exists=True):
def isfile(path):
return path in names
original = os.path.isfile
os.path.isfile = isfile
def restore():
os.path.isfile = original
self.addCleanup(restore)
def testParseArgsFileNames(self):
# running tests with filenames instead of module names
program = self.program
argv = ['progname', 'foo.py', 'bar.Py', 'baz.PY', 'wing.txt']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
# note that 'wing.txt' is not a Python file so the name should
# *not* be converted to a module name
expected = ['foo', 'bar', 'baz', 'wing.txt']
self.assertEqual(program.testNames, expected)
def testParseArgsFilePaths(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsNonExistentFiles(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile([])
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
def testParseArgsAbsolutePathsThatCanBeConverted(self):
cur_dir = os.getcwd()
program = self.program
def _join(name):
return os.path.join(cur_dir, name)
argv = ['progname', _join('foo/bar/baz.py'), _join('green\\red.py')]
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsAbsolutePathsThatCannotBeConverted(self):
program = self.program
# even on Windows '/...' is considered absolute by os.path.abspath
argv = ['progname', '/foo/bar/baz.py', '/green/red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
# it may be better to use platform specific functions to normalise paths
# rather than accepting '.PY' and '\' as file seprator on Linux / Mac
# it would also be better to check that a filename is a valid module
# identifier (we have a regex for this in loader.py)
# for invalid filenames should we raise a useful error rather than
# leaving the current error message (import of filename fails) in place?
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
henridwyer/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
rue89-tech/edx-analytics-pipeline | edx/analytics/tasks/util/tests/test_opaque_key_util.py | 3 | 5866 | """
Tests for utilities that parse event logs.
"""
from opaque_keys.edx.locator import CourseLocator
import edx.analytics.tasks.util.opaque_key_util as opaque_key_util
from edx.analytics.tasks.tests import unittest
VALID_COURSE_ID = unicode(CourseLocator(org='org', course='course_id', run='course_run'))
VALID_LEGACY_COURSE_ID = "org/course_id/course_run"
INVALID_LEGACY_COURSE_ID = "org:course_id:course_run"
INVALID_NONASCII_LEGACY_COURSE_ID = u"org/course\ufffd_id/course_run"
VALID_NONASCII_LEGACY_COURSE_ID = u"org/cours\u00e9_id/course_run"
class CourseIdTest(unittest.TestCase):
"""
Verify that course_id filtering works correctly.
"""
def test_normal_opaque_course_id(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_COURSE_ID))
def test_normal_legacy_course_id(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_LEGACY_COURSE_ID))
def test_legacy_course_id_without_components(self):
self.assertFalse(opaque_key_util.is_valid_course_id(INVALID_LEGACY_COURSE_ID))
def test_course_id_with_valid_nonascii(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_NONASCII_LEGACY_COURSE_ID))
def test_course_id_with_invalid_nonascii(self):
self.assertFalse(opaque_key_util.is_valid_course_id(INVALID_NONASCII_LEGACY_COURSE_ID))
def test_no_course_id(self):
self.assertFalse(opaque_key_util.is_valid_course_id(None))
def test_valid_org_id(self):
self.assertTrue(opaque_key_util.is_valid_org_id(u'org_id\u00e9'))
def test_invalid_org_id(self):
self.assertFalse(opaque_key_util.is_valid_org_id(u'org\ufffd_id'))
def test_no_org_id(self):
self.assertFalse(opaque_key_util.is_valid_org_id(None))
def test_get_valid_org_id(self):
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_COURSE_ID), "org")
def test_get_valid_legacy_org_id(self):
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_LEGACY_COURSE_ID), "org")
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_NONASCII_LEGACY_COURSE_ID), "org")
def test_get_invalid_legacy_org_id(self):
self.assertIsNone(opaque_key_util.get_org_id_for_course(INVALID_LEGACY_COURSE_ID))
self.assertIsNone(opaque_key_util.get_org_id_for_course(INVALID_NONASCII_LEGACY_COURSE_ID))
def test_get_filename(self):
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID), "org_course_id_course_run")
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID, '-'), "org-course_id-course_run")
def test_get_filename_with_colon(self):
course_id = unicode(CourseLocator(org='org', course='course:id', run='course:run'))
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID), "org_course_id_course_run")
self.assertEquals(opaque_key_util.get_filename_safe_course_id(course_id, '-'), "org-course-id-course-run")
def test_get_filename_for_legacy_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_LEGACY_COURSE_ID),
"org_course_id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_LEGACY_COURSE_ID, '-'),
"org-course_id-course_run"
)
def test_get_filename_for_invalid_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_LEGACY_COURSE_ID),
"org_course_id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_LEGACY_COURSE_ID, '-'),
"org-course_id-course_run"
)
def test_get_filename_for_nonascii_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_NONASCII_LEGACY_COURSE_ID),
u"org_cours__id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_NONASCII_LEGACY_COURSE_ID, '-'),
u"org-cours-_id-course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_NONASCII_LEGACY_COURSE_ID),
u"org_course__id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_NONASCII_LEGACY_COURSE_ID, '-'),
u"org-course-_id-course_run"
)
def test_get_course_key_from_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_COURSE_ID)
def test_get_course_key_from_legacy_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_LEGACY_COURSE_ID)
def test_get_course_key_from_invalid_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=INVALID_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertIsNone(course_key)
def test_get_course_key_from_nonascii_url(self):
url = u"https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_NONASCII_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_NONASCII_LEGACY_COURSE_ID)
url = u"https://courses.edx.org/courses/{course_id}/stuff".format(course_id=INVALID_NONASCII_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertIsNone(course_key)
| agpl-3.0 |
poppu-mtg/StackIt | StackIt/GUIapp.py | 1 | 4165 | import os, shutil, sys, time
from . import globals, builder
if sys.version_info.major == 3:
from tkinter import *
else:
from Tkinter import *
# from tkFileDialog import *
from PIL import Image, ImageTk
class ScrollIt():
def __init__(self):
self.image1 = Image.open(mGui.btn2text.get()[9:] + '-scroll.png')
w1, h1 = self.image1.size
self.imagefull = Image.new("RGB", (w1 * 2, h1), "black")
self.imagefull.paste(self.image1, (0, 0))
self.imagefull.paste(self.image1, (w1, 0))
self.photo1 = ImageTk.PhotoImage(self.imagefull)
width1 = self.photo1.width()
height1 = self.photo1.height()
novi1 = Toplevel()
self.canvas1 = Canvas(novi1, width=1980, height=34)
self.canvas1.pack(expand=1, fill=BOTH) # <--- Make your canvas expandable.
x = (width1)/2.0
y = (height1)/2.0
self.item = self.canvas1.create_image(x, y, image=self.photo1) # <--- Save the return value of the create_* method.
self.x00, self.y00 = self.canvas1.coords(self.item)
self.canvas1.bind('<Button-1>', self.next_image)
def next_image(self, even=None):
x0, y0 = self.canvas1.coords(self.item)
if x0 < 3:
self.canvas1.coords(self.item, (self.x00, y0))
else:
self.canvas1.move(self.item, -3, 0)
self.canvas1.after(60, self.next_image)
def OpenPro1():
if mGui.Listname.get() != '':
deckname = mGui.Listname.get()
elif len(mGui.Listentry.get("1.0", "end-1c")) != 0:
deckname = 'sample.txt'
if os.path.isfile(deckname):
os.remove(deckname)
decktext = mGui.Listentry.get("1.0", 'end-1c')
with open(deckname, "a") as outf:
outf.write(decktext + '\n')
builder.main(deckname)
if deckname == 'sample.txt':
if os.path.exists(os.path.join(globals.CACHE_PATH, deckname)):
os.remove(os.path.join(globals.CACHE_PATH, deckname))
shutil.move(deckname, os.path.join(globals.CACHE_PATH, deckname))
novi = Toplevel()
canvas = Canvas(novi, width = 350, height = 1000)
canvas.pack(expand = YES, fill = BOTH)
#gif1 = PhotoImage(file = 'image.gif')
gif1=ImageTk.PhotoImage(Image.open(deckname[:-4] + '.png'))
canvas.create_image(50, 10, image = gif1, anchor = NW)
#assigned the gif1 to the canvas object
canvas.gif1 = gif1
mGui.btn2text.set('BannerIt ' + deckname[:-4])
mGui.Button_2.config(state='active')
def OpenPro2():
ScrollIt()
mGui = Tk()
mGui.configure(background='white')
mGui.title(' StackIt')
mGui.geometry("350x565")
tkimage = ImageTk.PhotoImage(Image.open(os.path.join(globals.RESOURCES_PATH, 'StackIt-Logo.png')).resize((345, 87)))
mGui.Logo = Label(mGui, image=tkimage)
mGui.Logo.grid(row=0, column=0, columnspan=3)
mGui.Label1 = Label(mGui, text=' Decklist:')
mGui.Label1.grid(row=1, column=0)
mGui.Listname = Entry(mGui)
mGui.Listname.grid(row=1, column=1)
mGui.Button_1 = Button(mGui, text="Generate", command=OpenPro1)
mGui.Button_1.grid(row=1, column=2)
#mGui.Listentry=Entry(mGui)
#mGui.Listentry.grid(row=2, column=0, columnspan=3)
mGui.Label2 = Label(mGui, text=' Paste board:')
mGui.Label2.grid(row=2, column=0, columnspan=3)
mGui.Listentry=Text(mGui, height=25, width=40, relief=GROOVE, undo=True, xscrollcommand=True, yscrollcommand=True, bd=2)
mGui.Listentry.grid(row=3, column=0, columnspan=3)
mGui.btn2text = StringVar()
mGui.btn2text.set('BannerIt ')
mGui.Button_2 = Button(mGui, textvariable=mGui.btn2text, state='disabled', command=OpenPro2)
mGui.Button_2.grid(row=4, column=0, columnspan=3)
def main():
if len(sys.argv) > 1 and sys.argv[1] == "--automatedtest":
def draw():
mGui.update_idletasks()
mGui.update()
draw()
mGui.Listentry.insert(END, "60 Island\n4 Urza's Tower\n200 Shadowborn Apostle")
draw()
OpenPro1()
draw()
mGui.Listname.insert(END, "testdecks/StressTest1.dec")
draw()
OpenPro1()
draw()
time.sleep(1)
else:
mGui.mainloop()
if __name__ == "__main__":
main()
| mit |
ofer43211/unisubs | apps/teams/migrations/0102_auto__add_billingreport.py | 5 | 30439 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BillingReport'
db.create_table('teams_billingreport', (
('end_date', self.gf('django.db.models.fields.DateField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('processed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teams.Team'])),
('start_date', self.gf('django.db.models.fields.DateField')()),
('csv_data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('teams', ['BillingReport'])
def backwards(self, orm):
# Deleting model 'BillingReport'
db.delete_table('teams_billingreport')
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.billingreport': {
'Meta': {'object_name': 'BillingReport'},
'csv_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.invite': {
'Meta': {'object_name': 'Invite'},
'approved': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.membershipnarrowing': {
'Meta': {'object_name': 'MembershipNarrowing'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'narrowing_includer'", 'null': 'True', 'to': "orm['teams.TeamMember']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'narrowings'", 'to': "orm['teams.TeamMember']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.setting': {
'Meta': {'unique_together': "(('key', 'team'),)", 'object_name': 'Setting'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'approved': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'review_base_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tasks_based_on'", 'null': 'True', 'to': "orm['videos.SubtitleVersion']"}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tseams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teamlanguagepreference': {
'Meta': {'unique_together': "(('team', 'language_code'),)", 'object_name': 'TeamLanguagePreference'},
'allow_reads': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_writes': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'preferred': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lang_preferences'", 'to': "orm['teams.Team']"})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamnotificationsetting': {
'Meta': {'object_name': 'TeamNotificationSetting'},
'basic_auth_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basic_auth_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'request_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'notification_settings'", 'unique': 'True', 'to': "orm['teams.Team']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'teams.workflow': {
'Meta': {'unique_together': "(('team', 'project', 'team_video'),)", 'object_name': 'Workflow'},
'approve_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'autocreate_subtitle': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'autocreate_translate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'}),
'review_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams']
| agpl-3.0 |
BeegorMif/HTPC-Manager | lib/sqlalchemy/event/api.py | 75 | 3844 | # event/api.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .. import util, exc
from .base import _registrars
from .registry import _EventKey
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier, target))
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` function
will revert all of these operations.
.. versionadded:: 0.9.0
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
| gpl-3.0 |
jendap/tensorflow | tensorflow/python/ops/stateless_random_ops.py | 8 | 11692 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessTruncatedNormal")
@tf_export("random.stateless_uniform")
def stateless_random_uniform(shape,
seed,
minval=0,
maxval=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a uniform distribution.
This is a stateless version of `tf.random_uniform`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the
range of random values to generate. Defaults to 1 if `dtype` is floating
point.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "stateless_random_uniform",
[shape, seed, minval, maxval]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
return gen_stateless_random_ops.stateless_random_uniform_int(
shape, seed=seed, minval=minval, maxval=maxval, name=name)
else:
rnd = gen_stateless_random_ops.stateless_random_uniform(
shape, seed=seed, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
@tf_export("random.stateless_normal")
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export("random.stateless_truncated_normal")
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.truncated_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_truncated_normal(
shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export(v1=["random.stateless_multinomial"])
@deprecation.deprecated(
date=None, instructions="Use tf.random.stateless_categorical instead.")
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.multinomial`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_multinomial(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
output_dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples,
output_dtype, seed)
@tf_export("random.stateless_categorical")
def stateless_categorical(logits,
num_samples,
seed,
dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a categorical distribution.
This is a stateless version of `tf.categorical`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_categorical", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples, dtype,
seed)
def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed):
"""Implementation for stateless multinomial/categorical ops (v1/v2)."""
logits = ops.convert_to_tensor(logits, name="logits")
return gen_stateless_random_ops.stateless_multinomial(
logits, num_samples, seed, output_dtype=dtype)
| apache-2.0 |
kustodian/ansible | lib/ansible/module_utils/network/ftd/fdm_swagger_client.py | 19 | 26649 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.network.ftd.common import HTTPMethod
from ansible.module_utils.six import integer_types, string_types, iteritems
FILE_MODEL_NAME = '_File'
SUCCESS_RESPONSE_CODE = '200'
DELETE_PREFIX = 'delete'
class OperationField:
URL = 'url'
METHOD = 'method'
PARAMETERS = 'parameters'
MODEL_NAME = 'modelName'
DESCRIPTION = 'description'
RETURN_MULTIPLE_ITEMS = 'returnMultipleItems'
TAGS = "tags"
class SpecProp:
DEFINITIONS = 'definitions'
OPERATIONS = 'operations'
MODELS = 'models'
MODEL_OPERATIONS = 'model_operations'
class PropName:
ENUM = 'enum'
TYPE = 'type'
REQUIRED = 'required'
INVALID_TYPE = 'invalid_type'
REF = '$ref'
ALL_OF = 'allOf'
BASE_PATH = 'basePath'
PATHS = 'paths'
OPERATION_ID = 'operationId'
SCHEMA = 'schema'
ITEMS = 'items'
PROPERTIES = 'properties'
RESPONSES = 'responses'
NAME = 'name'
DESCRIPTION = 'description'
class PropType:
STRING = 'string'
BOOLEAN = 'boolean'
INTEGER = 'integer'
NUMBER = 'number'
OBJECT = 'object'
ARRAY = 'array'
FILE = 'file'
class OperationParams:
PATH = 'path'
QUERY = 'query'
class QueryParams:
FILTER = 'filter'
class PathParams:
OBJ_ID = 'objId'
def _get_model_name_from_url(schema_ref):
path = schema_ref.split('/')
return path[len(path) - 1]
class IllegalArgumentException(ValueError):
"""
Exception raised when the function parameters:
- not all passed
- empty string
- wrong type
"""
pass
class ValidationError(ValueError):
pass
class FdmSwaggerParser:
_definitions = None
_base_path = None
def parse_spec(self, spec, docs=None):
"""
This method simplifies a swagger format, resolves a model name for each operation, and adds documentation for
each operation and model if it is provided.
:param spec: An API specification in the swagger format, see
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md>
:type spec: dict
:param spec: A documentation map containing descriptions for models, operations and operation parameters.
:type docs: dict
:rtype: dict
:return:
Ex.
The models field contains model definition from swagger see
<#https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitions>
{
'models':{
'model_name':{...},
...
},
'operations':{
'operation_name':{
'method': 'get', #post, put, delete
'url': '/api/fdm/v2/object/networks', #url already contains a value from `basePath`
'modelName': 'NetworkObject', # it is a link to the model from 'models'
# None - for a delete operation or we don't have information
# '_File' - if an endpoint works with files
'returnMultipleItems': False, # shows if the operation returns a single item or an item list
'parameters': {
'path':{
'param_name':{
'type': 'string'#integer, boolean, number
'required' True #False
}
...
},
'query':{
'param_name':{
'type': 'string'#integer, boolean, number
'required' True #False
}
...
}
}
},
...
},
'model_operations':{
'model_name':{ # a list of operations available for the current model
'operation_name':{
... # the same as in the operations section
},
...
},
...
}
}
"""
self._definitions = spec[SpecProp.DEFINITIONS]
self._base_path = spec[PropName.BASE_PATH]
operations = self._get_operations(spec)
if docs:
operations = self._enrich_operations_with_docs(operations, docs)
self._definitions = self._enrich_definitions_with_docs(self._definitions, docs)
return {
SpecProp.MODELS: self._definitions,
SpecProp.OPERATIONS: operations,
SpecProp.MODEL_OPERATIONS: self._get_model_operations(operations)
}
@property
def base_path(self):
return self._base_path
def _get_model_operations(self, operations):
model_operations = {}
for operations_name, params in iteritems(operations):
model_name = params[OperationField.MODEL_NAME]
model_operations.setdefault(model_name, {})[operations_name] = params
return model_operations
def _get_operations(self, spec):
paths_dict = spec[PropName.PATHS]
operations_dict = {}
for url, operation_params in iteritems(paths_dict):
for method, params in iteritems(operation_params):
operation = {
OperationField.METHOD: method,
OperationField.URL: self._base_path + url,
OperationField.MODEL_NAME: self._get_model_name(method, params),
OperationField.RETURN_MULTIPLE_ITEMS: self._return_multiple_items(params),
OperationField.TAGS: params.get(OperationField.TAGS, [])
}
if OperationField.PARAMETERS in params:
operation[OperationField.PARAMETERS] = self._get_rest_params(params[OperationField.PARAMETERS])
operation_id = params[PropName.OPERATION_ID]
operations_dict[operation_id] = operation
return operations_dict
def _enrich_operations_with_docs(self, operations, docs):
def get_operation_docs(op):
op_url = op[OperationField.URL][len(self._base_path):]
return docs[PropName.PATHS].get(op_url, {}).get(op[OperationField.METHOD], {})
for operation in operations.values():
operation_docs = get_operation_docs(operation)
operation[OperationField.DESCRIPTION] = operation_docs.get(PropName.DESCRIPTION, '')
if OperationField.PARAMETERS in operation:
param_descriptions = dict((
(p[PropName.NAME], p[PropName.DESCRIPTION])
for p in operation_docs.get(OperationField.PARAMETERS, {})
))
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.PATH].items():
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.QUERY].items():
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
return operations
def _enrich_definitions_with_docs(self, definitions, docs):
for model_name, model_def in definitions.items():
model_docs = docs[SpecProp.DEFINITIONS].get(model_name, {})
model_def[PropName.DESCRIPTION] = model_docs.get(PropName.DESCRIPTION, '')
for prop_name, prop_spec in model_def.get(PropName.PROPERTIES, {}).items():
prop_spec[PropName.DESCRIPTION] = model_docs.get(PropName.PROPERTIES, {}).get(prop_name, '')
prop_spec[PropName.REQUIRED] = prop_name in model_def.get(PropName.REQUIRED, [])
return definitions
def _get_model_name(self, method, params):
if method == HTTPMethod.GET:
return self._get_model_name_from_responses(params)
elif method == HTTPMethod.POST or method == HTTPMethod.PUT:
return self._get_model_name_for_post_put_requests(params)
elif method == HTTPMethod.DELETE:
return self._get_model_name_from_delete_operation(params)
else:
return None
@staticmethod
def _return_multiple_items(op_params):
"""
Defines if the operation returns one item or a list of items.
:param op_params: operation specification
:return: True if the operation returns a list of items, otherwise False
"""
try:
schema = op_params[PropName.RESPONSES][SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
return PropName.ITEMS in schema[PropName.PROPERTIES]
except KeyError:
return False
def _get_model_name_from_delete_operation(self, params):
operation_id = params[PropName.OPERATION_ID]
if operation_id.startswith(DELETE_PREFIX):
model_name = operation_id[len(DELETE_PREFIX):]
if model_name in self._definitions:
return model_name
return None
def _get_model_name_for_post_put_requests(self, params):
model_name = None
if OperationField.PARAMETERS in params:
body_param_dict = self._get_body_param_from_parameters(params[OperationField.PARAMETERS])
if body_param_dict:
schema_ref = body_param_dict[PropName.SCHEMA][PropName.REF]
model_name = self._get_model_name_byschema_ref(schema_ref)
if model_name is None:
model_name = self._get_model_name_from_responses(params)
return model_name
@staticmethod
def _get_body_param_from_parameters(params):
return next((param for param in params if param['in'] == 'body'), None)
def _get_model_name_from_responses(self, params):
responses = params[PropName.RESPONSES]
if SUCCESS_RESPONSE_CODE in responses:
response = responses[SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
if PropName.REF in response:
return self._get_model_name_byschema_ref(response[PropName.REF])
elif PropName.PROPERTIES in response:
ref = response[PropName.PROPERTIES][PropName.ITEMS][PropName.ITEMS][PropName.REF]
return self._get_model_name_byschema_ref(ref)
elif (PropName.TYPE in response) and response[PropName.TYPE] == PropType.FILE:
return FILE_MODEL_NAME
else:
return None
def _get_rest_params(self, params):
path = {}
query = {}
operation_param = {
OperationParams.PATH: path,
OperationParams.QUERY: query
}
for param in params:
in_param = param['in']
if in_param == OperationParams.QUERY:
query[param[PropName.NAME]] = self._simplify_param_def(param)
elif in_param == OperationParams.PATH:
path[param[PropName.NAME]] = self._simplify_param_def(param)
return operation_param
@staticmethod
def _simplify_param_def(param):
return {
PropName.TYPE: param[PropName.TYPE],
PropName.REQUIRED: param[PropName.REQUIRED]
}
def _get_model_name_byschema_ref(self, schema_ref):
model_name = _get_model_name_from_url(schema_ref)
model_def = self._definitions[model_name]
if PropName.ALL_OF in model_def:
return self._get_model_name_byschema_ref(model_def[PropName.ALL_OF][0][PropName.REF])
else:
return model_name
class FdmSwaggerValidator:
def __init__(self, spec):
"""
:param spec: dict
data from FdmSwaggerParser().parse_spec()
"""
self._operations = spec[SpecProp.OPERATIONS]
self._models = spec[SpecProp.MODELS]
def validate_data(self, operation_name, data=None):
"""
Validate data for the post|put requests
:param operation_name: string
The value must be non empty string.
The operation name is used to get a model specification
:param data: dict
The value must be in the format that the model(from operation) expects
:rtype: (bool, string|dict)
:return:
(True, None) - if data valid
Invalid:
(False, {
'required': [ #list of the fields that are required but were not present in the data
'field_name',
'patent.field_name',# when the nested field is omitted
'patent.list[2].field_name' # if data is array and one of the field is omitted
],
'invalid_type':[ #list of the fields with invalid data
{
'path': 'objId', #field name or path to the field. Ex. objects[3].id, parent.name
'expected_type': 'string',# expected type. Ex. 'object', 'array', 'string', 'integer',
# 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The data parameter must be a dict' if data neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
if data is None:
data = {}
self._check_validate_data_params(data, operation_name)
operation = self._operations[operation_name]
model = self._models[operation[OperationField.MODEL_NAME]]
status = self._init_report()
self._validate_object(status, model, data, '')
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
return False, self._delete_empty_field_from_report(status)
return True, None
def _check_validate_data_params(self, data, operation_name):
if not operation_name or not isinstance(operation_name, string_types):
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
if not isinstance(data, dict):
raise IllegalArgumentException("The data parameter must be a dict")
if operation_name not in self._operations:
raise IllegalArgumentException("{0} operation does not support".format(operation_name))
def validate_query_params(self, operation_name, params):
"""
Validate params for the get requests. Use this method for validating the query part of the url.
:param operation_name: string
The value must be non empty string.
The operation name is used to get a params specification
:param params: dict
should be in the format that the specification(from operation) expects
Ex.
{
'objId': "string_value",
'p_integer': 1,
'p_boolean': True,
'p_number': 2.3
}
:rtype:(Boolean, msg)
:return:
(True, None) - if params valid
Invalid:
(False, {
'required': [ #list of the fields that are required but are not present in the params
'field_name'
],
'invalid_type':[ #list of the fields with invalid data and expected type of the params
{
'path': 'objId', #field name
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The params parameter must be a dict' if params neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
return self._validate_url_params(operation_name, params, resource=OperationParams.QUERY)
def validate_path_params(self, operation_name, params):
"""
Validate params for the get requests. Use this method for validating the path part of the url.
:param operation_name: string
The value must be non empty string.
The operation name is used to get a params specification
:param params: dict
should be in the format that the specification(from operation) expects
Ex.
{
'objId': "string_value",
'p_integer': 1,
'p_boolean': True,
'p_number': 2.3
}
:rtype:(Boolean, msg)
:return:
(True, None) - if params valid
Invalid:
(False, {
'required': [ #list of the fields that are required but are not present in the params
'field_name'
],
'invalid_type':[ #list of the fields with invalid data and expected type of the params
{
'path': 'objId', #field name
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The params parameter must be a dict' if params neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
return self._validate_url_params(operation_name, params, resource=OperationParams.PATH)
def _validate_url_params(self, operation, params, resource):
if params is None:
params = {}
self._check_validate_url_params(operation, params)
operation = self._operations[operation]
if OperationField.PARAMETERS in operation and resource in operation[OperationField.PARAMETERS]:
spec = operation[OperationField.PARAMETERS][resource]
status = self._init_report()
self._check_url_params(status, spec, params)
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
return False, self._delete_empty_field_from_report(status)
return True, None
else:
return True, None
def _check_validate_url_params(self, operation, params):
if not operation or not isinstance(operation, string_types):
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
if not isinstance(params, dict):
raise IllegalArgumentException("The params parameter must be a dict")
if operation not in self._operations:
raise IllegalArgumentException("{0} operation does not support".format(operation))
def _check_url_params(self, status, spec, params):
for prop_name in spec.keys():
prop = spec[prop_name]
if prop[PropName.REQUIRED] and prop_name not in params:
status[PropName.REQUIRED].append(prop_name)
continue
if prop_name in params:
expected_type = prop[PropName.TYPE]
value = params[prop_name]
if prop_name in params and not self._is_correct_simple_types(expected_type, value, allow_null=False):
self._add_invalid_type_report(status, '', prop_name, expected_type, value)
def _validate_object(self, status, model, data, path):
if self._is_enum(model):
self._check_enum(status, model, data, path)
elif self._is_object(model):
self._check_object(status, model, data, path)
def _is_enum(self, model):
return self._is_string_type(model) and PropName.ENUM in model
def _check_enum(self, status, model, value, path):
if value is not None and value not in model[PropName.ENUM]:
self._add_invalid_type_report(status, path, '', PropName.ENUM, value)
def _add_invalid_type_report(self, status, path, prop_name, expected_type, actually_value):
status[PropName.INVALID_TYPE].append({
'path': self._create_path_to_field(path, prop_name),
'expected_type': expected_type,
'actually_value': actually_value
})
def _check_object(self, status, model, data, path):
if data is None:
return
if not isinstance(data, dict):
self._add_invalid_type_report(status, path, '', PropType.OBJECT, data)
return None
if PropName.REQUIRED in model:
self._check_required_fields(status, model[PropName.REQUIRED], data, path)
model_properties = model[PropName.PROPERTIES]
for prop in model_properties.keys():
if prop in data:
model_prop_val = model_properties[prop]
expected_type = model_prop_val[PropName.TYPE]
actually_value = data[prop]
self._check_types(status, actually_value, expected_type, model_prop_val, path, prop)
def _check_types(self, status, actually_value, expected_type, model, path, prop_name):
if expected_type == PropType.OBJECT:
ref_model = self._get_model_by_ref(model)
self._validate_object(status, ref_model, actually_value,
path=self._create_path_to_field(path, prop_name))
elif expected_type == PropType.ARRAY:
self._check_array(status, model, actually_value,
path=self._create_path_to_field(path, prop_name))
elif not self._is_correct_simple_types(expected_type, actually_value):
self._add_invalid_type_report(status, path, prop_name, expected_type, actually_value)
def _get_model_by_ref(self, model_prop_val):
model = _get_model_name_from_url(model_prop_val[PropName.REF])
return self._models[model]
def _check_required_fields(self, status, required_fields, data, path):
missed_required_fields = [self._create_path_to_field(path, field) for field in
required_fields if field not in data.keys() or data[field] is None]
if len(missed_required_fields) > 0:
status[PropName.REQUIRED] += missed_required_fields
def _check_array(self, status, model, data, path):
if data is None:
return
elif not isinstance(data, list):
self._add_invalid_type_report(status, path, '', PropType.ARRAY, data)
else:
item_model = model[PropName.ITEMS]
for i, item_data in enumerate(data):
self._check_types(status, item_data, item_model[PropName.TYPE], item_model, "{0}[{1}]".format(path, i),
'')
@staticmethod
def _is_correct_simple_types(expected_type, value, allow_null=True):
def is_numeric_string(s):
try:
float(s)
return True
except ValueError:
return False
if value is None and allow_null:
return True
elif expected_type == PropType.STRING:
return isinstance(value, string_types)
elif expected_type == PropType.BOOLEAN:
return isinstance(value, bool)
elif expected_type == PropType.INTEGER:
is_integer = isinstance(value, integer_types) and not isinstance(value, bool)
is_digit_string = isinstance(value, string_types) and value.isdigit()
return is_integer or is_digit_string
elif expected_type == PropType.NUMBER:
is_number = isinstance(value, (integer_types, float)) and not isinstance(value, bool)
is_numeric_string = isinstance(value, string_types) and is_numeric_string(value)
return is_number or is_numeric_string
return False
@staticmethod
def _is_string_type(model):
return PropName.TYPE in model and model[PropName.TYPE] == PropType.STRING
@staticmethod
def _init_report():
return {
PropName.REQUIRED: [],
PropName.INVALID_TYPE: []
}
@staticmethod
def _delete_empty_field_from_report(status):
if not status[PropName.REQUIRED]:
del status[PropName.REQUIRED]
if not status[PropName.INVALID_TYPE]:
del status[PropName.INVALID_TYPE]
return status
@staticmethod
def _create_path_to_field(path='', field=''):
separator = ''
if path and field:
separator = '.'
return "{0}{1}{2}".format(path, separator, field)
@staticmethod
def _is_object(model):
return PropName.TYPE in model and model[PropName.TYPE] == PropType.OBJECT
| gpl-3.0 |
vc3-project/vc3-info-service | vc3infoservice/core.py | 1 | 11353 | #!/bin/env python
__author__ = "John Hover"
__copyright__ = "2017 John Hover"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.1"
__maintainer__ = "John Hover"
__email__ = "[email protected]"
__status__ = "Production"
import logging
import random
import string
class InfoConnectionFailure(Exception):
'''
Network connection failure exception.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoMissingPairingException(Exception):
'''
Exception thrown when a pairing code is invalid, either because it never existed
or the pairing has already been retrieved.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityExistsException(Exception):
'''
Exception thrown when an attempt to create an entity with a
name that already exists. Old entity must be deleted first.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityMissingException(Exception):
'''
Exception thrown when an attempt to get a non-existent entity is made.
Entity must be created before it can be updated.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityUpdateMissingException(Exception):
'''
Exception thrown when an attempt to *update* a non-existent entity is made.
Entity must be created before it can be updated.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoAttributeFacade(object):
'''
Intercepts __setattr__ one level down for InfoEntities.
'''
def __init__(self, parent, attrname):
log = logging.getLogger()
object.__setattr__(self, '_parent', parent)
object.__setattr__(self, '_attrname', attrname)
log.debug("Facade made for attribute %s parent %s" % (attrname, parent))
def __setattr__(self, name, value):
'''
'''
log = logging.getLogger()
if name in self.__class__.infoattributes:
try:
diffmap = self._diffmap
except AttributeError:
diffmap = {}
for at in self.__class__.infoattributes:
diffmap[at] = 0
object.__setattr__(self,'_diffmap', diffmap)
diffmap[name] += 1
log.debug('infoattribute %s incremented to %s' % ( name, diffmap[name] ) )
else:
log.debug('non-infoattribute %s' % name)
object.__setattr__(self, name, value)
def __getattr__(self, attrname):
return object.__getattr__(self, name)
class InfoEntity(object):
'''
Template for Information entities. Common functions.
Classes that inherit from InfoEntity must set class variables to describe handling.
'''
infokey = 'unset'
infoattributes = []
intattributes = []
validvalues = {}
nameattributes = ['name']
def __setattr__(self, name, value):
'''
_difflist List of (info)attributes that have been changed (not just
initialized once.
'''
log = logging.getLogger()
if name in self.__class__.infoattributes:
try:
diffmap = self._diffmap
except AttributeError:
diffmap = {}
for at in self.__class__.infoattributes:
diffmap[at] = 0
object.__setattr__(self,'_diffmap', diffmap)
diffmap[name] += 1
else:
log.debug('non-infoattribute %s' % name)
object.__setattr__(self, name, value)
#def __getattr__(self, name):
# '''
# To be on the safe side, we track attributes that have been retrieved.
# Client may alter an object that is the value of the attribute.
# '''
# log = logging.getLogger()
# if name in self.__class__.infoattributes:
# try:
# diffmap = self._diffmap
# except AttributeError:
# diffmap = {}
# for at in self.__class__.infoattributes:
# diffmap[at] = 0
# object.__setattr__(self,'_diffmap', diffmap)
# diffmap[name] += 1
# log.debug('infoattribute %s' % name)
# else:
# log.debug('non-infoattribute %s' % name)
# object.__getattr__(self, name)
def getDiffInfo(self):
'''
Return a list of info attributes which have been set > 1 time.
'''
retlist = []
try:
diffmap = self._diffmap
except AttributeError:
pass
for a in diffmap.keys():
if diffmap[a] > 1:
retlist.append(a)
return retlist
def __repr__(self):
s = "%s( " % self.__class__.__name__
for a in self.__class__.infoattributes:
val = getattr(self, a, None)
if isinstance(val, str) or isinstance(val, unicode):
if len(val) > 80:
s+="%s=%s... " % (a, val[:25] )
else:
s+="%s=%s " % (a, val )
else:
s+="%s=%s " % (a, val )
s += ")"
return s
def makeDictObject(self, newonly=False):
'''
Converts this Python object to attribute dictionary suitable for addition to existing dict
intended to be converted back to JSON. Uses <obj>.name as key:
'''
d = {}
d[self.name] = {}
if newonly:
# only copy in values that have been re-set after initialization
self.log.debug("newonly set, getting diff info...")
difflist = self.getDiffInfo()
for attrname in difflist:
d[self.name][attrname] = getattr(self, attrname)
else:
# copy in all infoattribute values
self.log.debug("newonly not set, doing all values...")
for attrname in self.infoattributes:
d[self.name][attrname] = getattr(self, attrname)
self.log.debug("Returning dict: %s" % d)
return d
def setState(self, newstate):
self.log.debug("%s object name=%s %s ->%s" % (self.__class__.__name__, self.name, self.state, newstate) )
self.state = newstate
def store(self, infoclient):
'''
Updates this Info Entity in store behind given infoclient.
'''
keystr = self.__class__.infokey
validvalues = self.__class__.validvalues
for keyattr in validvalues.keys():
validlist = validvalues[keyattr]
attrval = getattr(self, keyattr)
if attrval not in validlist:
self.log.warning("%s entity has invalid value '%s' for attribute '%s' " % (self.__class__.__name__,
attrval, keyattr) )
#resources = infoclient.getdocumentobject(key=keystr)
if hasattr(self, 'storenew'):
entdict = self.makeDictObject(newonly=False)
self.log.debug("Dict obj: %s" % entdict)
infoclient._storeentitydict(keystr, entdict )
else:
entdict = self.makeDictObject(newonly=True)
self.log.debug("Dict obj: %s" % entdict)
infoclient._mergeentitydict(keystr, entdict )
self.log.debug("Stored entity %s in key %s" % (self.name, keystr))
def addAcl(self, aclstring):
pass
def removeAcl(self, aclstring):
pass
def getClone(self, newname = None):
'''
Make new identical object with new name attribute.
'''
self.log.debug("making clone of %s object name=%s " % (self.__class__.__name__, self.name) )
dictobject = self.makeDictObject() # has name as index of attribute dict
dict = dictobject[self.name]
if newname is not None:
dict['name'] = newname
else:
dict['name'] = self.generateName()
self.log.debug('new dict is %s' % dict)
newobj = self.__class__.objectFromDict(dict)
newobj.storenew = True
self.log.debug('new object is %s' % newobj)
return newobj
def generateName(self, length=8):
'''
Make new name attribute appropriate to this object.
For parent InfoEntity, just generate a random string...
'''
self.log.debug("Generating name...")
randomstr = InfoEntity.randomChars(length)
self.log.debug("Got random part %s" % randomstr)
newname = ""
for na in self.__class__.nameattributes:
self.log.debug("Building name with %s " % na)
newname += InfoEntity.normalizeAttribute(getattr(self, na))
newname += "-%s" % randomstr
return newname
@classmethod
def objectFromDict(cls, dict):
'''
Returns an initialized Entity object from dictionary.
Input: Dict:
{
"name" : "<name>",
"att1" : "<val1>"
}
'''
log = logging.getLogger()
log.debug("Making object from dictionary...")
#name = dict.keys()[0]
#d = dict[name]
d = dict
args = {}
for key in cls.infoattributes:
try:
args[key] = d[key]
except KeyError, e:
args[key] = None
log.warning("Document object does not have a '%s' key" % e.args[0])
for key in cls.intattributes:
try:
if args[key] is not None:
args[key] = int(args[key])
except KeyError, e:
log.warning("Document object does not have a '%s' key" % e.args[0])
eo = cls(**args)
log.debug("Successfully made object from dictionary, returning...")
return eo
@classmethod
def randomChars(cls, length=5):
log = logging.getLogger()
log.debug("Generating random chars...")
randomstr = ''.join([random.choice(string.ascii_lowercase) for n in xrange(length)])
return randomstr
@classmethod
def normalizeAttribute(cls, value):
log = logging.getLogger()
log.debug("Normalizing %s " % value)
v = str(value)
v = v.lower()
v = v.replace(" ","")
v= v[0:16]
log.debug("Value normalized to %s" % v)
return v
class InfoPersistencePlugin(object):
def __init__(self, parent, config, section ):
self.log = logging.getLogger()
self.lock = MockLock()
self.parent = parent
self.config = config
self.section = section
class MockLock(object):
'''
Provided as a convenience for persistence back ends that don't require atomic operations.
'''
def acquire(self):
pass
def release(self):
pass
| gpl-3.0 |
2013Commons/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Prof.py | 37 | 75689 | #._cv_part guppy.heapy.Prof
from Tkinter import *
import tkFileDialog
import tkMessageBox
class MyVar(StringVar):
_default = 0.0
def set(self, value):
StringVar.set(self, '%.2g'%value)
suffixes = ('','K','M','G','T')
def sizestring(value):
value = float(value)
sign = 1
if value < 0:
sign = -1
value = - value
i = 0
while value > 99999:
value /= 1000
i += 1
s = str(int(round(value)))+suffixes[i]
if s.endswith('000'+suffixes[i]):
s = str(int(round(value/1000)))+suffixes[i+1]
if sign == -1:
s = '-' + s
return s
def percentstring(value):
a = abs(value)
if 10 <= a <= 9999:
return '%d'%round(value)
elif 0.01 <= a <= 10:
return '%.2g'%value
elif a <= 1e-10:
return '0'
else:
return '%.0e'%value
def stringsize(s):
if s.isdigit():
return int(s)
suf = s[-1:].upper()
mult = 1000l
for su in suffixes[1:]:
if su == suf:
break
mult *= 1000
else:
raise ValueError
return int(s[:-1])*mult
class Menu(Menu):
# A fix for the .delete() method in Menu.
# To delete commands defined in the menu items deleted.
# Also changed the comment: INDEX2 is actually INCLUDED.
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
# First find out what entries have defined commands.
cmds = []
for i in range(self.index(index1), self.index(index2)+1):
c = str(self.entrycget(i, 'command'))
if c in self._tclCommands:
# I don't want to delete the command already, since it
# seems mystical to do that while the entry is not yet deleted.
cmds.append(c)
# Delete the menu entries.
self.tk.call(self._w, 'delete', index1, index2)
# Now that the menu entries have been deleted,
# we can delete their commands.
for c in cmds:
self.deletecommand(c)
class SizeVar(StringVar):
_default = 0.0
def set(self, value):
self._value = value
s = sizestring(value)
StringVar.set(self, s)
class ValueLabel(Label):
def __init__(self, *args, **kwds):
kwds['width']=10
Label.__init__(self, *args, **kwds)
class ClickButton(Button):
# Button that runs the command directly at the click, not at release.
# And has auto-repeat.
def __init__(self, master, command, firstdelay=500,thendelay=150, **kwds):
Button.__init__(self, master, **kwds)
self._command = command
self._firstdelay = firstdelay
self._thendelay = thendelay
self.bind('<Button-1>', self._event_button)
self.bind('<ButtonRelease-1>', self._event_release)
def _event_button(self, event=None):
self._command()
if event is not None:
delay = self._firstdelay
else:
delay = self._thendelay
self._after = self.after(delay, self._event_button)
def _event_release(self, event):
self.after_cancel(self._after)
del self._after
class Stats:
def __init__(self, mod, fn=None):
self.mod = mod
self.os = mod.os
self.md5 = mod.md5
self.fn = fn
def clear_cache(self):
# It is intended to be transparently
# automagically reopened when needed.
self.stats = None
del self.stats
def get_stats(self):
self.open(self.fn)
return self.stats
stats = property(get_stats)
def collect(self):
if not self.fn:
return 0,0
stat = self.os.stat(self.fn)
if stat == self.laststat:
return len(self), 0
f = open(self.fn)
str = f.read(self.lastfilesize)
md5 = self.md5.md5(str)
digest = md5.digest()
if digest == self.lastdigest:
numoldstats = len(self)
else:
self.loadstr(str, reset=1)
numoldstats = 0
str = f.read()
self.laststat = self.os.fstat(f.fileno())
f.close()
self.lastfilesize = self.laststat.st_size
md5.update(str)
self.lastdigest = md5.digest()
self.loadstr(str)
numnewstats = len(self.stats)-numoldstats
return numoldstats, numnewstats
def open(self, fn):
if not fn:
self.len_stats = 0
self.stats = []
self.max_size = 0
self.fn = fn
return
f = open(fn)
str = f.read()
lastdigest = self.md5.md5(str).digest()
laststat = self.os.fstat(f.fileno())
f.close()
self.loadstr(str, reset=1)
# Update these only if there was no exception so far.
self.fn = fn
self.lastdigest = lastdigest
self.laststat = laststat
self.lastfilesize = laststat.st_size
def loadstr(self, str, reset=0):
stats = []
lines = str.split('\n')
del str
linesiter = iter(lines)
max_size = 0
while 1:
try:
st = self.mod.Use.load(linesiter)
except StopIteration:
break
stats.append(st)
if st.size > max_size:
max_size = st.size
# Only update self if there were no exception so far
if reset:
self.stats = []
self.max_size = 0
self.max_size = max(self.max_size, max_size)
self.stats.extend(stats)
self.len_stats = len(self.stats)
def __getitem__(self, idx):
return self.stats[idx]
def __len__(self):
try:
return self.len_stats
except AttributeError:
self.len_stats = len(self.stats)
return self.len_stats
def get_max_size(self):
return self.max_size
class ProfileRow:
kindwidth = 30
def __init__(self, master, row, usecolor=1):
self.master = master
self.row = row
if usecolor:
colbg = Frame(master=master,bg='black',width=1, borderwidth=1, relief=GROOVE)
self.color = Label(master=colbg,bg='white',width=1, borderwidth=1, relief=GROOVE)
self.color.grid(row=0, column=0)
colbg.grid(row=row,column=0, sticky=NW)
self.rsizevar = SizeVar()
self.rsize = Label(master=master, textvariable=self.rsizevar, width=6,anchor=E)
self.rpercentvar = StringVar() #BBIntVar()
self.rpercent = Label(master=master,textvariable=self.rpercentvar, width=3,anchor=E)
self.dsizevar = SizeVar()
self.dsize = Label(master=master, textvariable=self.dsizevar, width=6,anchor=E)
self.dpercentvar = StringVar() #BBIntVar()
self.dpercent = Label(master=master,textvariable=self.dpercentvar, width=3,anchor=E)
self.kindvar = StringVar()
self.kind = Label(master=master, textvariable=self.kindvar, anchor=NW,
width=self.kindwidth ,justify=LEFT)
self.rsize.grid(row=row, column=1, sticky=NE)
self.rpercent.grid(row=row,column=2,sticky=NE)
self.dsize.grid(row=row,column=3,sticky=NE)
self.dpercent.grid(row=row,column=4,sticky=NE)
self.kind.grid(row=row, column=5, sticky=NW)
def set_color_size_percent_kind(self, color, rsize, rpercent, dsize, dpercent, kind):
self.set_color(color)
if color is not None:
self.set_color(color)
self.rsizevar.set(rsize)
if rpercent is None:
rpercent = ''
else:
rpercent = str(int(round(rpercent)))
self.rpercentvar.set(rpercent)
self.dsizevar.set(dsize)
dpercent = str(int(round(dpercent)))
self.dpercentvar.set(dpercent)
self.set_kind(kind)
def set_color(self, color):
self.color.configure(bg=color)
def set_kind(self, kind):
self.kindtext = kind
if len(kind) > self.kindwidth:
import textwrap
kind = textwrap.fill(kind, width=self.kindwidth)
self.kindvar.set(kind)
def clear(self):
self.set_color_size_percent_kind(self.master['bg'], 0, 0, 0, 0, '--')
class AxisControl:
scale_table = [1l, 2l, 5l]
while scale_table[-1] < 1e12:
scale_table.append(scale_table[-3] * 10l)
def __init__(self, master,
name,
range,
grid,
unit,
rangecommand,
gridcommand,
autocommand=None
):
small = 0
self.name = name
self.unit = unit
self.range = range
self.rangecommand = rangecommand
self.frame = frame = Frame(master,borderwidth=2,relief=GROOVE)
self.rangevar = SizeVar()
self.rangevar.set(range)
if 1:
rangeval = Entry(master=self.frame,
# anchor=E,
width=4,
textvar=self.rangevar,
#font=('fixed', '16', 'bold'),
#font=('terminal', '16', 'bold'),
#font=('terminal', '14'),
font=('fixed', '14'),
#bg='black',fg='yellow'
bg='#fdd'
)
rangeval.bind('<KeyPress-Return>',self.event_range_enter)
elif 1:
rangeval = Button(master=self.frame,
anchor=E,
width=4,
textvar=self.rangevar,
#font=('fixed', '16', 'bold'),
font=('terminal', '16', 'bold'),
bg='black',fg='yellow')
else:
rangeval = Listbox(
self.frame,
height=1,
width=4,
font=('terminal', '16', 'bold'),
bg='black',fg='yellow')
for scale in self.scale_table:
s = sizestring(scale)
rangeval.insert(0, s)
namelabel = Menubutton(frame, text=name, relief='raised', anchor=W)
namemenu = Menu(namelabel)
namelabel['menu']=namemenu
if autocommand:
self.autovar = BooleanVar()
self.autovar.set(True)
namemenu.add_checkbutton(
#autobutton = Checkbutton(frame,
label='Auto',
variable=self.autovar,
command = autocommand,
#relief=RAISED
)
autobutton = Checkbutton(frame,
text='Auto',
variable=self.autovar,
command = autocommand,
relief=RAISED
)
else:
self.autovar = None
if gridcommand:
self.gridvar = BooleanVar()
self.gridvar.set(grid)
namemenu.add_checkbutton(
label='Grid',
variable=self.gridvar,
command = lambda: gridcommand(self.gridvar.get()),
)
gridbutton = Checkbutton(frame,
text='Grid',
variable=self.gridvar,
command = lambda: gridcommand(self.gridvar.get()),
relief=RAISED
)
rangelabel = Label(frame, text='Range')
if name == 'Y' and small:
padx = 5
pady = 0
else:
padx = 3
pady = 3
ud = Frame(frame)
rangeup = ClickButton(ud, text='+',
pady=pady,padx=padx,
font=('fixed',8),
command=lambda:self.range_button(1))
rangedown = ClickButton(ud, text='-',
pady=pady,padx=padx,
font=('fixed',8),
command=lambda:self.range_button(-1))
rangedown.grid(row=0,column=0)
rangeup.grid(row=0,column=1)
row=0
if small and name == 'Y':
namelabel.grid(row=0, rowspan=1,column=0)
rangeup.grid(row=0, column=1, sticky=W)
autobutton.grid(row=1,column=0)
rangedown.grid(row=1, column=1, sticky=W)
rangeval.grid(row=2, column=0, columnspan=2,sticky=W,padx=3, pady=3)
elif small and name == 'X':
namelabel.grid(row=0, column=0)
rangeval.grid(row=0, column=1,sticky=W,padx=3, pady=3)
rangedown.grid(row=0, column=2, sticky=W)
rangeup.grid(row=0, column=3, sticky=W)
else:
namelabel.grid(row=row, column=0, sticky=N+W,ipadx=0,ipady=0,padx=2,pady=2)
rangelabel.grid(row=row, column=1, sticky=W)
ud.grid(row=row,column=2, padx=2)
row += 1
if gridcommand:
gridbutton.grid(row=row, column=0, sticky=W)
rangeval.grid(row=row, column=1, padx=3, pady=3)
if autocommand:
pass
autobutton.grid(row=row,column=2)
def cmd_range(self):
pass
def event_range_enter(self, event):
str = self.rangevar.get()
try:
rng = stringsize(str)
if rng not in self.scale_table:
if not 1 <= rng <= self.scale_table[-1]:
raise ValueError
except:
self.frame.bell()
self.errorbox("""\
Invalid range entry.
It should be a positive integer with an optional multiplier:
K, M, G, or T
(1000, 1e6, 1e9, 1e12)
Maximum range is 1T.""")
self.rangevar.set(self.range)
else:
if self.autovar:
self.autovar.set(False)
self.setrange(rng)
def auto_command(self):
pass
def errorbox(self, msg):
tkMessageBox.showerror(master=self.frame, message=msg)
def fit(self, range):
range = self.scale_by_table(range)
self.setrange(range)
def range_button(self, d):
if self.autovar:
self.autovar.set(False)
self.range_change(d)
def range_change(self, d):
range = self.range
srange = self.scale_by_table(range)
if srange > range:
if d > 0:
d -= 1
i = self.scale_table.index(srange)
i += d
if i >= len(self.scale_table):
i = len(self.scale_table) - 1
if i < 0:
i = 0
self.setrange(self.scale_table[i])
def setrange(self, range):
if range != self.range:
self.range = range
self.rangevar.set(range)
self.rangecommand(range)
def scale_by_table(self, s):
# Return the scale from table that is higher or equal to s
for ts in self.scale_table:
if ts >= s:
return ts
return self.scale_table[-1]
WM = 1
class Marker:
def __init__(self, d, tag, name, pos, poscommand=None):
self.d = d
self.tag = tag
self.name = name
self.xmarker = pos
self.butdown = 0
self.ocursor = d.ocursor
self.cursor = self.ocursor
self.poscommand = None
self.intpos = None
self.moving = 0
self.selected = 0
self.entered = 0
self.butdownselected = 0
self.motion_id = None
self.create()
def bind(self, sequence, function):
tag = self.tag
self.d.drawingarea.tag_bind(tag, sequence, function)
if WM:
self.xlabel.bind(sequence, function)
else:
self.d.xmarks.tag_bind(tag, sequence, function)
def coords(self, canx):
self.d.drawingarea.coords(self.tag,
canx, 0,
canx,-int(self.d.boty))
self.d.xmarks.coords(self.tag, canx, 10)
def create(self):
tag = self.tag
text = self.name
pos = 0
if 1:
self.d.drawingarea.create_line(pos, 0, pos, 20-self.d.boty, stipple='gray12',
width=4,tags=(tag,))
if WM:
label = self.xlabel = Label(self.d.xmarks, text=text, padx=2,pady=2,relief=RAISED)
self.d.xmarks.create_window(pos, 0, window=label, tags=(tag,))
else:
self.d.xmarks.create_text(pos, 0, text=text, tags=(tag,))
self.bind('<Button-1>', self.event_button_1)
self.bind('<ButtonRelease-1>', self.event_button_1_release)
self.bind('<Enter>', self.event_enter)
self.bind('<Leave>', self.event_leave)
self.d.drawingarea.bind('<Enter>', self.event_enter_movearea, add='+')
self.d.drawingarea.bind('<Button-1>', self.event_button_1_movearea, add='+')
def event_button_1(self, event):
self.butdown = 1
if self.selected:
self.butdownselected = 1
if self.moving:
self.event_stop_move(event)
else:
self.butdownselected = 0
self.has_moved = 0
self.event_selected(event)
self.event_start_move(event)
def event_button_1_movearea(self, event):
if not self.entered:
self.event_deselected(event)
def event_button_1_release(self, event):
self.butdown = 0
if self.has_moved == self.butdownselected:
if self.selected:
if self.moving and not (self.disloy <= event.y_root < self.dishiy):
self.event_stop_move(None)
self.setcursor(self.ocursor)
else:
self.setcursor(self.ocursor)
return
self.event_deselected(event)
def event_deselected(self, event):
if self.selected:
self.selected = 0
self.xlabel['relief'] = RAISED
if self.moving:
self.event_stop_move(event)
def event_enter(self, event):
self.entered = 1
if not self.moving:
if self.selected:
self.event_start_move(event)
else:
self.setcursor('hand2')
def event_enter_movearea(self, event):
if self.selected and not self.moving:
self.event_start_move(event)
def event_leave(self, event):
self.entered = 0
if not self.moving:
self.setcursor(self.ocursor)
elif not (self.fraloy <= event.y_root < self.frahiy):
pass
def event_motion(self, event):
self.has_moved = 1
if 0: # Simple variant - get back
if not (self.fraloy <= event.y_root < self.frahiy):
self.event_button_1_release(self.down_event)
return
inside = (self.fraloy <= event.y_root < self.frahiy)
if inside != self.inside:
self.inside = inside
if not inside:
self.out_event = event
self.event_stop_move(None)
if self.butdown:
self.setcursor('circle')
self.d.bind_motion(self.event_motion_downout)
else:
self.in_event = event
#self.delta += self.out_event.x_root - event.x_root
self.event_start_move(event)
return
if inside:
self.moved(event)
self.setxvars()
def event_motion_downout(self, event):
# We don't get an enter while button is pressed down
# Emulate an enter if we detect entering
inside = (self.fraloy <= event.y_root < self.frahiy)
if inside:
self.d.unbind_motion(self.event_motion_downout)
self.event_enter_movearea(event)
def event_selected(self, event):
for m in self.d.marks:
m.event_deselected(event)
self.selected = 1
self.xlabel['relief'] = SUNKEN
def event_start_move(self, event):
self.moving = 1
self.fralox = self.d.frame.winfo_rootx()
self.frahix = self.fralox + self.d.frame.winfo_width()
self.fraloy = self.d.frame.winfo_rooty()
self.frahiy = self.fraloy + self.d.frame.winfo_height()
self.dislox = self.d.drawingarea.winfo_rootx()
self.dishix = self.dislox + self.d.drawingarea.winfo_width()
self.disloy = self.d.drawingarea.winfo_rooty()
self.dishiy = self.disloy + self.d.drawingarea.winfo_height()
self.down_event = event
self.prev_event = event
self.down_xmarker = self.xmarker
self.down_xvfrac = self.d.drawingarea.xview()[0]
self.inside = 1
self.delta = 0
self.lift()
self.motion_id = self.d.bind_motion(self.event_motion)
self.moved(event)
def event_stop_move(self, event):
assert self.moving
self.moving = 0
self.d.unbind_motion(self.motion_id)
if event is not None:
self.moved(event)
self.setxvars()
if self.entered and not self.selected:
self.setcursor('hand2')
else:
self.setcursor(self.ocursor)
def lift(self):
self.d.xmarks.tag_raise(self.tag)
if WM:
self.xlabel.lift()
self.d.drawingarea.tag_raise(self.tag)
def move(self, sample):
canx = self.d.canxscaled(sample)
self.d.xview_pos(canx)
self.coords(canx)
self.xmarker = sample
self.lift()
def moved(self, event):
curx = event.x_root
cury = event.y_root
prevx = self.prev_event.x_root
if prevx > self.dishix and curx < self.dishix:
prevx = self.dishix
elif prevx < self.dislox and curx > self.dislox:
prevx = self.dislox
markx = self.d.canxscaled(self.xmarker) - self.d.drawingarea.canvasx(0) + self.dislox
dx = curx - prevx
l = r = 1
if self.xmarker >= self.d.numstats-1:
r = 0
if self.xmarker <= 0:
l = 0
stop = 0
# Should we allow to move it back or not
# if it is at an endpoint?
# Here we don't move it at all, to make marker pos correspond
# more closely with mouse position.
if ((r == 0 and curx > markx) or (l == 0 and curx < markx)):
l = r = 0
if self.butdown:
if curx > self.dishix:
l = 0
elif curx < self.dislox:
r = 0
else:
if not (self.dislox <= curx < self.dishix and
self.disloy <= cury < self.dishiy):
l = r = 0
stop = 1
if l and r:
self.setcursor('sb_h_double_arrow')
elif l:
self.setcursor('sb_left_arrow')
if dx > 0:
dx = 0
elif r:
self.setcursor('sb_right_arrow')
if dx < 0:
dx = 0
else:
self.setcursor('dot')
dx = 0
self.prev_event = event
sample = self.d.limitx(self.xmarker + dx / self.d.xscale)
canx = self.d.canxscaled(sample)
self.d.xview_pos(canx)
self.coords(canx)
self.xmarker = sample
if stop and self.moving:
self.event_stop_move(None)
def set(self):
canx = self.d.canxscaled(self.xmarker)
self.coords(canx)
self.lift()
def set_poscommand(self, command):
self.poscommand = command
self.intpos = None
def setcursor(self, cursor):
if cursor != self.cursor:
self.xlabel['cursor'] = cursor
self.cursor = cursor
self.d.setcursor(cursor)
def setxvars(self):
if self.poscommand:
intpos = int(round(self.xmarker))
if intpos != self.intpos:
self.intpos = intpos
self.poscommand(intpos)
class Display:
orgwidth = 300
orgheight = 300
minwidth = 30
minheight = 30
def __init__(self, master,
scale_table,
numkindrows,
getkindcolor,
xrange=100,
yrange=100,
xgrid = False,
ygrid = False,
graphtype = 'Bars',
statype = 'Size',
):
self.master = master
self.scale_table = scale_table
self.numkindrows = numkindrows
self.getkindcolor = getkindcolor
self.xrange = xrange
self.yrange = yrange
self.xgrid = xgrid
self.var_xgrid = BooleanVar(xgrid)
self.var_xgrid.set(xgrid)
self.var_ygrid = BooleanVar(xgrid)
self.ygrid = ygrid
self.var_ygrid.set(ygrid)
self.graphtype = graphtype
self.statype = statype
self.numstats = 0
self.ymaxs = []
self.ymins = []
self.ymax = 1
# To get around problems with dynamic unbinding / unbinding of motion,
# I handle it myself. in the bind_motion method using the following.
self.bound_motions = {}
self.event_motion_id = None
#
self.frame = frame = Frame(master,
borderwidth=3,
relief=SUNKEN,
#relief=GROOVE,
#background='green'
)
#self.frame = frame = Frame(master,background='green')
bordercolor = '#ccc'
screencolor = '#e0e0e0'
xscrollincrement = 1
frame = Frame(self.frame)
frame.grid(row=0,column=0)
#move = Frame(frame, height=10,width=10,background='red', relief=RAISED)
#move = Button(self.frame, height=10,width=10,background='red')
self.drawingarea = C = Canvas(frame,
width=self.orgwidth,
height=self.orgheight,
xscrollincrement=xscrollincrement,
#background='black',
background = screencolor,
bd=0,
xscrollcommand = self.xscrollbar_set,
#confine=False,
)
#self.yctrlframe = Frame(frame, borderwidth=2,relief=GROOVE)
self.yscrollbar = Scrollbar(frame, orient = VERTICAL, width=10)
#self.yscrollbar['command']=self.drawingarea.yview
#self.drawingarea['yscrollcommand'] = self.yscrollbar_set
#self.yscrollbar.pack(side=RIGHT,fill=Y)
#self.yctrlframe.grid(row = 0, column = 0,sticky=N+S,padx=3,pady=3)
self.xaxis = Canvas(frame,
width=C['width'],
height=20,
xscrollincrement=xscrollincrement,
bd=0,
background = bordercolor,
#xscrollcommand = self.xscrollbar_set
#confine=False,
)
self.xmarks = Canvas(frame,
width=C['width'],
height=20,
xscrollincrement=xscrollincrement,
bd=0,
background = bordercolor,
#xscrollcommand = self.xscrollbar_set
#confine=False,
)
self.yaxis = Canvas(frame, height=C['height'],width=50,
bd=0,
background = bordercolor,
)
self.xscrollbar = Scrollbar(frame, orient=HORIZONTAL,
command=self.drawingarea_xview,
width=12,
background = bordercolor,
)
xy = Canvas(frame, width=50,height=20,bd=0,
background = bordercolor,
)
#
if 0:
self.yaxis.grid(row = 0, column = 0)
self.yscrollbar.grid(row=0,column=2, sticky=N+S)
C.grid(row = 0, column = 1, sticky=W+E )
xy.grid(row=1,column=0)
self.xaxis.grid(row = 1, column = 1)
self.xscrollbar.grid(row=2,column=1,sticky=E+W)
self.rsbut.grid(row=2,column=2)
else:
var_yrange = SizeVar()
self.var_yrange = var_yrange
row = 0
Label(frame,
textvar=var_yrange,
bd=0,
relief=FLAT,
background=bordercolor).grid(
row=row,
column=0,
sticky=W+E+N+S)
self.xscrollbar.grid(row=row,column=1,sticky=E+W)
row += 1
self.yunit = Label(frame,
text='Bytes',
bd=0,
relief=FLAT,
background=bordercolor)
self.yunit.grid(
row=row,
column=0,
sticky=W+E+N+S)
self.xmarks.grid(row=row, column=1,sticky=W+E+N)
row += 1
self.yaxis.grid(row = row, column = 0)
C.grid(row = row, column = 1, sticky=W+E )
row += 1
xy.grid(row=row,column=0)
self.xaxis.grid(row = row, column = 1,sticky=W+E+N)
#
self.botx = float(C['width'])
self.boty = float(C['height'])
self.chdim = self.getchdim()
self.canx0 = 0
self.tmax = 0
self.xscale = self.botx / self.xrange
self.yscale = self.boty / self.yrange
self.xi0 = None
xy.create_line(0,2,44,2)
xy.create_line(49, 6,49,22)
xy.create_text(25, 14, text='Sample')
self.setscrollregion()
self.ocursor = self.drawingarea['cursor']
self.cursor = self.ocursor
self.marks = []
def bind_motion(self, function):
if self.event_motion_id == None:
self.event_motion_id = self.frame.bind_all('<Motion>', self.event_motion, add='+')
self.bound_motions[function] = self.bound_motions.get(function, 0) + 1
return function
def event_motion(self, event):
for f in self.bound_motions.keys():
f(event)
def unbind_motion(self, funcid):
n = self.bound_motions[funcid] - 1
if n == 0:
del self.bound_motions[funcid]
else:
self.bound_motions[funcid] = n
def new_xmarker(self, name = None, pos=0):
tag = 'M%d'%len(self.marks)
if name is None:
name = tag
m = Marker(self, tag, name, pos)
self.marks.append(m)
return m
def canxscaled(self, x):
return x * self.xscale + self.canx0
def canyscaled(self, y):
return - y * self.yscale
def cmd_xgrid(self):
self.xgrid = self.var_xgrid.get()
self.drawxaxis()
def cmd_ygrid(self):
self.ygrid = self.var_ygrid.get()
self.drawyaxis()
def cmd_yrange_auto(self):
self.ymax = None
self.yrange_auto()
def limitx(self, x):
lo = 0
hi = max(0, self.numstats-1)
if x < lo:
return lo
if x > hi:
return hi
return x
def resize(self, dx, dy):
x = self.botx + dx
y = self.boty + dy
if x < self.minwidth:
x = self.minwidth
dx = x - self.botx
if y < self.minheight:
y = self.minheight
dy = y - self.boty
xv = self.drawingarea.xview()
yv = self.drawingarea.yview()
self.drawingarea.configure(width=x, height=y)
self.xaxis.configure(width=x)
self.xmarks.configure(width=x)
self.yaxis.configure(height=y)
xscale = float(x) / self.xrange
yscale = float(y) / self.yrange
xscaleorg = self.drawingarea.canvasx(0)
yscaleorg = 0
xq = xscale / self.xscale
yq = yscale / self.yscale
self.drawingarea.scale("all",xscaleorg, yscaleorg, xq, yq)
#self.drawingarea.scale("barsep",xscaleorg, yscaleorg, xq, yq)
#self.drawingarea.scale("xmarker",xscaleorg, yscaleorg, xq, yq)
self.canx0 = xscaleorg + (self.canx0 - xscaleorg) * xq
self.botx = x
self.boty = y
self.xscale = xscale
self.yscale = yscale
self.drawxaxis()
self.drawyaxis()
self.setscrollregion()
# If the size changed much, the canvas may scroll though it shouldn't.
# Notes 11 and 26 Oct 2005 .
# I save the current scroll position.
# The caller has to call the .moveback() method some time later.
self.wantedpos = xv[0]
return dx, dy
def moveback(self):
self.frame.update_idletasks()
self.xview(MOVETO, self.wantedpos)
def draw():
self.drawxaxis()
self.drawyaxis()
def draw_stat(self, idx, stat):
graphtype = self.graphtype
statype = self.statype
rows = stat.get_rows_n_and_other(self.numkindrows, statype)
if statype == 'Size':
kindval = dict([(r.name, r.size) for r in rows])
else:
kindval = dict([(r.name, r.count) for r in rows])
order = [r.name for r in rows]
order.reverse()
lastkindval = self.lastkindval
self.lastkindval = kindval
C = self.drawingarea
yscale = self.yscale
xscale = self.xscale
x0 = idx * xscale - 0.5 * xscale + self.canx0
x1 = x0 + xscale
ymax = 0
ymin = 0
y = 0
bw = 0.05*xscale
ocolor = None
for k in order:
dy = kindval.get(k, 0)
if not dy:
continue
color = self.getkindcolor(k)
if graphtype == 'Bars':
line = C.create_rectangle(x0+bw, -y*yscale,
x1-bw, -(y+dy)*yscale,
fill=color,
outline=color,
width = 0,
tags=("a",))
if color == ocolor:
C.create_line(x0, -(y)*yscale,
x1, -(y)*yscale,
fill='black',
tags=('barsep',))
ocolor = color
y += dy
elif graphtype == 'Lines':
if dy > ymax:
ymax = dy
elif dy < ymin:
ymin = dy
y0 = lastkindval.get(k)
if y0 is None:
y0 = dy
x00 = x0
else:
x00 = x0 - 0.4 * xscale
C.create_line(x00, - y0 * yscale,
x1 - 0.6 * xscale, - dy * yscale,
fill=color,
tags=('a',))
if 1:
C.create_line(x1 - 0.6 * xscale, - dy * yscale,
x1 - 0.4 * xscale, - dy * yscale,
fill=color,
width = 4,
tags=('a',))
else:
C.create_rectangle(x1 - 0.6 * xscale, - dy * yscale,
x1 - 0.4 * xscale, - dy * yscale,
fill=color,
outline=color,
width = 2,
tags=('a',))
if graphtype == 'Bars':
if y > ymax:
ymax = y
elif y < ymin:
ymin = y
assert idx == len(self.ymaxs) == len(self.ymins)
self.ymaxs.append(ymax)
self.ymins.append(ymin)
if idx > self.tmax:
self.tmax = idx
def drawingarea_xview(self, cmd, what, unit=None):
if cmd == 'scroll' and unit == 'units':
what = int(max(2, self.xscale)*int(what))
self.xview(cmd, what, unit)
def setcursor(self, cursor):
if cursor != self.cursor:
self.drawingarea['cursor'] = cursor
self.master['cursor'] = cursor
self.cursor = cursor
def xmarkers_set(self):
for m in self.marks:
m.set()
def xview(self, *args):
if not args:
return self.drawingarea.xview()
self.drawingarea.xview(*args)
self.xaxis.xview(*args)
self.xmarks.xview(*args)
def xview_moveto(self, fraction):
self.xview(MOVETO, fraction)
def xview_pos(self, pos, fraction=None, leftmargin = 5, rightmargin = 5):
# Scroll canvas view, if necessary, so that something
# (eg an x marker) at canvas position pos will be visible
# with minimum specified margin at left and right.
# Scroll relative to fraction; default is current xview position.
if fraction is None:
fraction = self.xview()[0]
x1, y1, x2, y2 = self.scrollregion
cc = x1 + fraction * (x2 - x1)
xm = pos - cc
lo = leftmargin
hi = self.botx - rightmargin
if xm < lo:
dx = xm - lo
xm = lo
elif xm >= hi:
dx = (xm - hi)
xm = hi
else:
dx = 0
r = fraction + dx / float(x2 - x1)
self.xview_moveto(r)
def drawxaxis(self):
scale_table = self.scale_table
self.xaxis.delete('all')
self.drawingarea.delete('xgrid')
x1, y1, x2, y2 = self.scrollregion
chdx, chdy = self.chdim
i = 0
while (scale_table[i] * self.xscale <
min(5, len(str(scale_table[i] * self.tmax))) * chdx):
i+=1
self.xstep = scale_table[i]
divisuf = (
(1000000000000l, '%dT'),
(1000000000l, '%dG'),
(1000000, '%dM'),
(1000, '%dK'),
(1, '%d')
)
for divi, form in divisuf:
if self.xstep >=divi:
break
self.xdivi = divi
self.xform = form
self.xi0 = 0
self.updatexaxis()
def updatexaxis(self):
chdx, chdy = self.chdim
step = self.xstep
gridon = self.xgrid
for i in range(self.xi0, self.tmax+step, step):
x = self.canx0 + i*self.xscale
self.xaxis.create_line(x, 0, x, 4)
if gridon:
self.drawingarea.create_line(x, 0, x, -self.boty,
tags=('xgrid',),width=2,stipple="gray25")
text = self.xform%(i / self.xdivi)
self.xaxis.create_text(x, chdy, text=text)
self.xaxis.create_line(self.canx0 + self.xi0*self.xscale, 1, x+self.xscale, 1)
self.xi0 = i
self.xmarkers_set()
def drawyaxis(self):
gridon = self.ygrid
self.yaxis.delete('all')
self.drawingarea.delete('ygrid')
chdx, chdy = self.getchdim()
width = int(self.yaxis['width'])
i = 0
maxval = self.yrange
while (self.scale_table[i] * self.yscale < 1.5 * chdy):
i+=1
step = self.scale_table[i]
divisuf = (
(1000000000000l, '%4dT'),
(1000000000l, '%4dG'),
(1000000, '%4dM'),
(1000, '%4dK'),
(1, '%5d')
)
for divi, form in divisuf:
if step >=divi:
break
for i in range(0, maxval+step, step):
y = - i*self.yscale
self.yaxis.create_line(width-3, y, width-1, y)
if gridon:
self.drawingarea.create_line(self.scrollregion[0], y,
self.scrollregion[2], y,
stipple="gray25",
tags=('ygrid',))
if 0 and i == 0:
text = '0 bytes'
else:
text = form % (i / divi)
self.yaxis.create_text(chdx*2.5, y-0.5*chdy, text=text)
#self.yaxis.create_text(chdx*2.5, 0.5*chdy, text='bytes')
self.yaxis.create_line(width-1, 0, width-1, -self.boty)
self.xmarkers_set()
def getchdim(self):
ch = self.xaxis.create_text(0, 0, text='0')
x1, y1, x2, y2 = self.xaxis.bbox(ch)
self.xaxis.delete(ch)
chdx = abs(x2 - x1)
chdy = abs(y2 - y1)
return chdx, chdy
def load_stats(self, stats):
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
self.numstats = len(stats)
self.lastkindval = {}
self.tmax = 0
self.ymax = None
self.ymaxs = []
self.ymins = []
C = self.drawingarea
C.delete('barsep')
C.delete('a')
for (i, st) in enumerate(stats):
self.draw_stat(i, st)
try:
self.drawingarea.tag_raise('barsep', 'a')
except TclError:
pass # May be 'tagOrId "a" doesn't match any items' if empty!
self.drawxaxis()
self.drawyaxis()
self.xmarkers_set()
self.yrange_auto()
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def add_stats(self, stats):
for (i, st) in enumerate(stats):
self.draw_stat(i+self.numstats, st)
self.numstats += len(stats)
self.updatexaxis()
self.setscrollregion()
def setxgrid(self, grid):
self.xgrid = grid
self.drawxaxis()
def setygrid(self, grid):
self.ygrid = grid
self.drawyaxis()
def setgraphtype(self, gmode, stats):
graphtype, statype = gmode.split(' ')
if graphtype != self.graphtype or statype != self.statype:
self.graphtype = graphtype
self.statype = statype
if statype == 'Size':
self.yunit['text'] = 'Bytes'
elif statype == 'Count':
self.yunit['text'] = 'Objects'
else:
raise ValueError
self.load_stats(stats)
def setscrollregion(self):
C = self.drawingarea
botx = self.botx
x1 = self.canx0
x2 = self.tmax * self.xscale + self.canx0
if 0:
x1extra = botx
x2extra = botx
if 1:
x1extra = botx / 2 + 2 #max(5, self.xscale*0.5)
x2extra = botx / 2 + 2 #max(5, self.xscale*0.5)
if 0:
x1extra = x2extra = max(5, self.xscale * 0.5)
x1 -= x1extra
x2 += x2extra
y1 = 1-self.boty
y2 = 1
if 0:
try:
_x1, _y1, _x2, _y2 = self.scrollregion
except:
pass
else:
if (abs(_x2 - x2) < x2extra / 2 and
abs(_x1 - x1) < x1extra / 2
):
return
self.scrollregion = (x1, y1, x2, y2)
C.configure(scrollregion = self.scrollregion)
self.xaxis.configure(scrollregion = (x1, 0, x2, 10))
self.xmarks.configure(scrollregion = (x1, 0, x2, 20))
self.yaxis.configure(scrollregion = (0, y1, 20, y2))
self.drawingarea.yview(MOVETO, 0.0)
def setxrange(self, xrange):
dxrange = self.xrange / float(xrange)
self.xrange = xrange
xscaleorg = self.drawingarea.canvasx(self.botx/2)
self.drawingarea.scale("a",xscaleorg, 0, dxrange, 1.0)
self.drawingarea.scale("barsep",xscaleorg, 0, dxrange, 1.0)
self.canx0 = xscaleorg + (self.canx0 - xscaleorg) * dxrange
self.xscale = self.botx / float(self.xrange)
self.setxscrollincrement(max(2, self.xscale))
self.drawxaxis()
self.setscrollregion()
def setxscrollincrement(self, dx):
return
self.drawingarea.configure(xscrollincrement=dx)
self.xaxis.configure(xscrollincrement=dx)
self.xmarks.configure(xscrollincrement=dx)
def setyrange(self, yrange):
dyrange = float(self.yrange) / yrange
self.yrange = yrange
self.var_yrange.set(yrange)
self.drawingarea.scale("a",0, 0, 1.0, dyrange)
self.drawingarea.scale("barsep",0, 0, 1.0, dyrange)
self.yscale = float(self.boty) / self.yrange
self.drawingarea.yview(MOVETO, 0.0)
self.drawyaxis()
def xscrollbar_set(self, first, last):
self.xscrollbar.set(first, last)
self.yrange_auto()
def yrange_auto(self, force=0):
if force or self.ycontrol.autovar.get():
lo = max(0,
int(0.5+(self.drawingarea.canvasx(0) - self.canx0) / self.xscale))
hi = min(len(self.ymaxs),
int(1.5+(self.drawingarea.canvasx(self.botx) - self.canx0) / self.xscale))
if lo == hi:
ymax = 1
else:
ymax = max(self.ymaxs[lo:hi])
if ymax != self.ymax:
self.ymax = ymax
self.ycontrol.fit(ymax)
class MarkerControl:
def __init__(self, master,
marker,
setcommand = lambda:0
):
self.sample = 0
self.numsamples = 0
self.setcommand = setcommand
self.marker = marker
self.name = marker.name
sf = self.frame = Frame(master, borderwidth=2,relief=GROOVE)
self.samplevar = SizeVar()
Label(sf, text='%s sample'%marker.name).grid(row = 0, column = 0)
Label(sf,
textvariable=self.samplevar,
font=('terminal', '16', 'bold'),
bg='black',fg='yellow'
).grid(row = 1, column = 0, padx=3,pady=3)
ClickButton(sf, text='-',
pady=0,padx=5,
command=lambda:self.changesample(-1)).grid(row=0,column=1, sticky=E)
ClickButton(sf, text='+',
pady=0,padx=5,
command=lambda:self.changesample(1)).grid(row=0,column=2, sticky=W)
self.trackingvar = BooleanVar()
self.trackbutton = Checkbutton(
sf, text='Track',
padx=5,
variable = self.trackingvar,
relief=RAISED,
command=self.settracking,
indicatoron=1,
)
self.trackbutton.grid(row=1,column=1,columnspan=2)
def changesample(self, d):
sample = self.sample + d
if 0 <= sample < self.numsamples:
self.setmarker(sample)
def setmarker(self, sample):
self.marker.move(sample)
self.setsample(sample)
def setnumsamples(self, num):
self.numsamples = num
if self.trackingvar.get() or self.sample >= self.numsamples:
self.setmarker(max(0, self.numsamples-1))
def setsample(self, sample):
self.sample = sample
self.samplevar.set(sample)
self.setcommand()
def settracking(self, tracking=None):
if tracking is not None:
self.trackingvar.set(tracking)
else:
tracking = self.trackingvar.get()
if tracking:
self.setmarker(max(0, self.numsamples-1))
class Window:
def __init__(self, app, frame, windowmenu=None):
self.app = app
self.frame = frame
self.windowmenu = windowmenu
self.wtitle = frame.title()
self._is_destroyed = 0
# Binding to <destroy> didnt work well:
# frame.bind('<Destroy>', self.event_destroy, add='+')
# I give up. I modify .destroy of frame argument instead.
self.old_destroy = frame.destroy
frame.destroy = self.new_destroy
def new_destroy(self):
if self._is_destroyed:
return
self._is_destroyed = 1
self.app.del_window(self)
try:
self.old_destroy()
except TclError:
# This may happen at closing last window
# because exit destroys the root when it sees all windows were closed.
# So I ignore it.
pass
def title(self, title):
self.frame.title(title)
self.frame.iconname(title)
self.wtitle = title
self.app.chg_window(self)
def wakeup(self):
frame = self.frame
try:
if frame.wm_state() == "iconic":
frame.wm_deiconify()
frame.tkraise()
# I don't think I want .focus_set: it behaved strange in X at least.
#frame.focus_set()
except TclError:
# This can happen when the window menu was torn off.
# Simply ignore it.
pass
class WindowMenu:
def __init__(self, frame, variable):
self.button = Menubutton(frame, text='Window')
self.menu = Menu(self.button)
self.button['menu'] = self.menu
self.variable = variable
self.wmap = {}
def add_window(self, window):
self.menu.add_radiobutton(
command = window.wakeup,
label='%d %s'%(window.wid, window.wtitle),
value=window.wid,
variable=self.variable)
self.wmap[window.wid] = self.menu.index(END)
def chg_window(self, window):
self.menu.delete(self.wmap[window.wid])
self.menu.insert_radiobutton(
self.wmap[window.wid],
command = window.wakeup,
label='%d %s'%(window.wid, window.wtitle),
value=window.wid,
variable=self.variable)
def del_window(self, window):
idx = self.wmap[window.wid]
del self.wmap[window.wid]
try:
self.menu.delete(idx)
except TclError:
# This can happen if the menu was destroyed before its contents.
# Simply ignore it.
pass
for wid in self.wmap.keys():
if self.wmap[wid] > idx:
self.wmap[wid] -= 1
class ProfileApp:
def __init__(self, mod):
self.mod = mod
root = Tk()
self.root = root
root.withdraw()
self.windows = {}
self.windowmenus = {}
self.var_window = IntVar(root)
def add_window(self, window):
window.wid = max([0]+self.windows.keys())+1
self.windows[window.wid] = window
wm = getattr(window, 'windowmenu', None)
if wm:
self.windowmenus[window.wid] = wm
for w in self.windows.values():
if w is not window:
wm.add_window(w)
for wm in self.windowmenus.values():
wm.add_window(window)
self.var_window.set(window.wid)
window.frame.bind('<FocusIn>',
lambda event:self.var_window.set(window.wid), add='+')
window.frame.bind('<Deactivate>',
lambda event:self.var_window.set(0), add='+')
def add_window_frame(self, frame, windowmenu=None):
w = Window(self, frame, windowmenu)
self.add_window(w)
return w
def chg_window(self, window):
for wm in self.windowmenus.values():
wm.chg_window(window)
def del_window(self, window):
wid = window.wid
if getattr(window, 'windowmenu', None):
del self.windowmenus[wid]
del self.windows[wid]
for wm in self.windowmenus.values():
wm.del_window(window)
if not self.windows:
self.exit()
def exit(self):
try:
self.root.destroy()
except TclError:
pass
self.root.quit()
def mainloop(self):
return self.root.mainloop()
def new_profile_browser(self, filename):
return ProfileBrowser(self, filename)
class PaneDiv:
def __init__(self, master, movecommand):
self.frame = frame = Frame(master)
self.movecommand = movecommand
self.butsize = bs = 6
bc = self.butcent = bs / 2 + 3
h = 10
self.top = Canvas(
frame,
width=10,
height=h,
)
self.top.create_line(
bc,0,bc,h,fill='#808080', width=1)
self.top.create_line(
bc+1,0,bc+1,h,fill='white', width=1)
self.rsbut = Canvas(
frame,
cursor='crosshair',
width=self.butsize,
height=self.butsize,
relief=RAISED,
bd=2
)
self.bot = Canvas(
frame,
width=10,
height=300,
bd=0
)
self.top.grid(row=0,column=0, sticky=N)
self.rsbut.grid(row=1,column=0, sticky=N)
self.bot.grid(row=2,column=0, sticky=N)
self.rsbut.bind('<Button-1>',self.but_down)
self.rsbut.bind('<ButtonRelease-1>', self.but_up)
def but_down(self, event):
self.down_event = event
self.rsbut.configure(relief=SUNKEN)
def but_up(self, event):
self.rsbut.configure(relief=RAISED)
dx = event.x - self.down_event.x
self.movecommand(dx)
def setheight(self, height):
h = height - 18
self.bot['height'] = h
bc = self.butcent
self.bot.create_line(
bc,0,bc,h,fill='#808080', width=1)
self.bot.create_line(
bc+1,0,bc+1,h,fill='white', width=1)
class TableFrame:
def __init__(self, graph, master, numkindrows, samplevar):
self.graph = graph
self.mod = graph.mod
frame = self.frame = Frame(master,borderwidth=2,relief=GROOVE)
row = 0
self.marktime = StringVar()
self.totsizevar = SizeVar()
self.sampler = StringVar()
self.sampler.set('R')
if 1:
fr = Frame(frame) # For header
om = OptionMenu(fr, self.sampler, 'R', 'L', 'R-L')
om.grid(row=0,column=0,sticky=W)
Label(fr, text='Sample').grid(row=0,column=1,sticky=W)
Label(fr, textvariable=samplevar,background='black',foreground='yellow',
).grid(row=0,column=2,sticky=W, pady=3)
Label(fr, text='at').grid(row=0,column=3,sticky=W)
Label(fr, textvariable=self.marktime).grid(row = 0, column = 4, sticky=W)
Label(fr, text='Total size = ').grid(row=1,column=0,columnspan=3,sticky=W)
Label(fr, textvar=self.totsizevar).grid(row=1,column=3,columnspan=2,sticky=W)
fr.grid(row=row, column=0, sticky=W)
row += 1
orow = row
tb = Frame(frame)
row = 0
Label(tb, text="").grid(row=row, column=0)
Label(tb, text="R", ).grid(row=row, column=1, sticky=E)
Label(tb, text="%R").grid(row=row, column=2, sticky=E)
Label(tb, text="R-L", ).grid(row=row, column=3, sticky=E)
Label(tb, text="%L").grid(row=row, column=4, sticky=E)
Label(tb, text="Kind").grid(row=row, column=5, sticky=W)
row += 1
self.profrows = []
self.totrow = ProfileRow(tb, row)
self.profrows.append(self.totrow)
row += 1
for i in range(numkindrows+1):
profrow = ProfileRow(tb, row)
self.profrows.append(profrow)
row += 1
row = orow
tb.grid(row=row, column=0, sticky=W)
# for next..
row += 1
self.totresize = 0
self.kindwidth = ProfileRow.kindwidth
def resize(self, dx, dy):
dx = int(dx)
self.totresize += dx
charresize, extra = divmod(self.totresize, 7)
newwidth = ProfileRow.kindwidth + charresize
oldwidth = self.profrows[0].kind['width']
if newwidth < 10:
newwidth = 10
dx = (newwidth - oldwidth) * 7 + extra
for pr in self.profrows:
pr.kind['width'] = newwidth
pr.kindwidth = newwidth
pr.kind['padx'] = extra / 2
import textwrap
kindtext = textwrap.fill(pr.kindtext, width=pr.kindwidth)
pr.set_kind(pr.kindtext)
return dx, dy
def update(self, lsamp, rsamp):
self.marktime.set(self.mod.time.asctime(self.mod.time.localtime(rsamp.stat.timemade)))
return
for pr in self.profrows:
pr.clear()
rdiv = float(rsamp.stat.size)
ldiv = float(lsamp.stat.size)
self.totrow.set_color_size_percent_kind(
None,
rsamp.stat.size,
100.0,
rsamp.stat.size - lsamp.stat.size,
(rsamp.stat.size - lsamp.stat.size) * 100.0 / ldiv,
'<Total>'
)
for i, r in enumerate(rsamp.rows):
l = lsamp.kindrows[r.name]
self.profrows[i+1].set_color_size_percent_kind(
self.graph.getkindcolor(r.name),
r.size,
r.size * 100.0 / rdiv,
r.size - l.size,
(r.size - l.size) * 100.0 / ldiv,
r.name)
class ColSpec:
def __init__(self, tf, header, width, pos, render, idx=()):
self.tf = tf
self.header = header
self.name = header
self.width = width
self.pos = pos
self.render = render
self.idx = idx
def align(self, text):
sp = ' '*(self.width - len(text))
if self.pos == LEFT:
text = text + sp
elif self.pos == RIGHT:
text = sp[:-1] + text + ' '
else:
assert 0
assert len(text) == self.width
return text
class TableFrame:
def __init__(self, graph, master):
self.graph = graph
self.mod = graph.mod
frame = self.frame = Frame(
master,
borderwidth=3,
relief=SUNKEN
)
self.colspecs = {}
self.colwidths = []
def defcol(names, width, pos, put, idxfunc = lambda x:()):
if callable(put):
put = [put]*len(names)
self.colwidths.append(width)
for name, put in zip(names, put):
spec = ColSpec(self, name, width, pos, put, idxfunc(name))
self.colspecs[name] = spec
defcol(('A', 'B'), 2, LEFT, self.putcolor, lambda x:x)
defcol(('Size', 'Count'), 7, RIGHT, [self.putsize, self.putcount])
defcol(('%A:Tot', '%B:Tot'), 7, RIGHT, self.putpercent, lambda name:name[1])
defcol(('B-A', 'A-B', 'Cumul'), 7, RIGHT, [self.putdiff, self.putdiff, self.putcumul],
lambda name:[(),name.split('-')]['-' in name])
defcol(('%A:Tot', '%B:Tot'), 7, RIGHT, self.putpercent, lambda name:name[1])
defcol(('Kind',), 20, LEFT, self.putkind)
width = 0
for w in self.colwidths:
width += w
self.totxresize = 0
self.totyresize = 0
self.kindcol = self.colspecs['Kind']
self.orgkindwidth = self.kindcol.width
self.widthbeforekind = width - self.orgkindwidth
self.minkindwidth = 10
self.mintextheight = 2
width += 1
self.width = self.orgwidth = width
wrap = NONE
cursor = master['cursor']
relief = FLAT
self.minpadx = 3
self.tothead = Text(
frame,
width=width,
wrap=wrap,
background='#ccc',
height=2,
padx=self.minpadx,
relief=relief,
cursor=cursor,
)
self.rowhead = Text(
frame,
width=width,
wrap=wrap,
background='#ccc',
height=1,
padx=self.minpadx,
relief=relief,
cursor=cursor,
)
self.tsframe = Frame(frame)
self.textminpady = 2
self.text = Text(
self.tsframe,
width=width,
wrap=wrap,
height=21,
background='#e0e0e0',
relief=relief,
takefocus=0,
cursor=cursor,
padx=self.minpadx,
pady=self.textminpady,
)
self.scrollbar = Scrollbar(
self.tsframe,
width=10,
orient=VERTICAL,
command=self.text.yview
)
self.scrollbar_totwidth = int(self.scrollbar['width']) + 6 # width + padding
self.uses_scrollbar = 0
self.auto_scrollbar = 1
self.orgtextheight = int(self.text['height'])
padx = 0
pady = 0
self.tothead.pack(anchor=N+W, padx=padx, pady=pady)
self.rowhead.pack(anchor=N+W, padx=padx, pady=pady)
self.text.pack(side=LEFT,anchor=N+W, padx=padx, pady=pady)
self.tsframe.pack(anchor=N+W, padx=padx, pady=pady)
def setchdim(self):
self.text.update()
self.chdx = float(self.text.winfo_width()) / self.width
self.chdy = float(self.text.winfo_height()) / self.orgtextheight
self.chdx = int(round(self.chdx))
self.chdy = int(round(self.chdy))
self.pixwidth = self.width * self.chdx
self.pixheight = self.width * self.chdy
def putcolor(self, col):
if self.colorow.name == '<Total>':
text = col.align(' ')
color = '#e0e0e0'
else:
color = self.graph.getkindcolor(self.colorow.name),
text = col.align('@')
self.text.insert('end', text, (color,))
self.text.tag_config(color,foreground=color, background='#e0e0e0',
font=('terminal', '12', 'bold'),)
def putcount(self, col):
self.valmode = 'Count'
count = self.colorow.count
self.cumulval += count
self.putval(col, count)
def putsize(self, col):
self.valmode = 'Size'
size = self.colorow.size
self.cumulval += size
self.putval(col, size)
def putval(self, col, val):
self.curval = val
self.ap(col.align(sizestring(val)))
def putpercent(self, col):
a = self.statbyname[col.idx]
if self.valmode == 'Count':
ref = a.count
elif self.valmode == 'Size':
ref = a.size
if ref:
ps = percentstring(self.curval * 100.0 / ref)
else:
ps = '---'
self.ap(col.align(ps))
def putdiff(self, col):
a, b = self.rowbyname[col.idx[0]], self.rowbyname[col.idx[1]]
if self.valmode == 'Count':
a, b = a.count, b.count
elif self.valmode == 'Size':
a, b = a.size, b.size
self.putval(col, a - b)
def putcumul(self, col):
self.putval(col, self.cumulval)
def putkind(self, col):
# Must be last!
import textwrap
wraplines = textwrap.wrap(self.colorow.name, width=col.width)
self.ap(col.align(wraplines[0]))
if len(wraplines) > 1:
initial = '\n'+' '*(self.widthbeforekind)
for line in wraplines[1:]:
self.ap(initial+col.align(line))
def setmode(self, mode, numkindrows):
self.mode = mode
self.numkindrows = numkindrows
self.mcontrols = self.graph.mcontrolbyname
self.stats = self.graph.stats
self.cols = [self.colspecs[x.strip()] for x in mode.split(' ') if x.strip()]
self.controlnames = {}
name = self.cols[0].idx
self.colorcontrol = self.mcontrols[name]
self.controlnames[name] = 1
self.controls = [self.colorcontrol]
self.lastidxs = [None]
for i, co in enumerate(self.cols):
idx = co.idx
if not isinstance(idx, (tuple, list)):
idx = (idx,)
for idx in idx:
if idx not in self.controlnames:
self.controls.append(self.mcontrols[idx])
self.controlnames[idx] = 1
self.lastidxs.append(None)
def setscrollbar(self, sb):
if sb == self.uses_scrollbar:
return
self.uses_scrollbar = sb
w = self.scrollbar_totwidth
if sb:
self.resize(-w, 0, setscrollbar=0)
self.scrollbar.pack(side=LEFT, fill=Y)
self.text['yscrollcommand'] = self.scrollbar.set
else:
self.resize(w, 0, setscrollbar=0)
self.scrollbar.pack_forget()
self.text['yscrollcommand'] = None
def update_simple(self, lsamp, rsamp):
t = self.text
t.delete('1.0', '100.0')
t.insert('1.0', str(rsamp.stat))
def update(self, force=0, setscrollbar=1):
stats = self.stats
idxs = [max(0, min(control.sample, len(stats)-1)) for control in self.controls]
if (idxs == self.lastidxs) and not force:
return
self.lastidxs = idxs
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = NORMAL
self.text.delete('1.0', END)
self.tothead.delete('1.0', END)
self.rowhead.delete('1.0', END)
if not stats:
self.tothead.insert('end', '-- No Sample --')
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = DISABLED
return
self.statbyname = {}
statbyidx = []
for i, control in enumerate(self.controls):
stat = stats[idxs[i]]
statbyidx.append(stat)
self.statbyname[control.name] = stat
samps = self.samps = [
Sample(self.mod, statbyidx[0], self.controls[0].marker.name, idxs[0],
numkindrows=self.numkindrows,
statype = self.graph.display.statype
)]
self.colorsamp = samps[0]
if len(self.controls) > 1:
samps.append(Sample(self.mod, statbyidx[1], self.controls[1].marker.name, idxs[1],
relative=samps[0]))
self.relsamp = samps[1]
t = self.tothead
n = max([len(str(samp.index)) for samp in samps])
for samp in samps:
t.insert('end', 'Sample %s: '%samp.name)
t.insert('end', ('%%%dd'%n)%samp.index, ('index',))
t.insert('end', ' at %s\n' % (samp.datetime))
t.tag_configure('index', background='#e0e0e0')
t = self.rowhead
self.sizes = [float(samp.stat.size) for samp in samps]
for col in self.cols:
t.insert('end', col.align(col.header), ('header',))
t.insert('end', '\n')
t = self.text
self.ap = lambda text:t.insert('end', text)
self.colorow = Row(samps[0].count, samps[0].size, '<Total>')
self.rowbyname = self.statbyname
self.cumulval = 0
for col in self.cols:
col.render(col)
self.ap('\n\n')
self.cumulval = 0
for i, a in enumerate(samps[0].rows):
self.colorow = a
if len(samps) > 1:
self.rowbyname = {
samps[0].name:a,
samps[1].name:samps[1].kindrows[a.name]
}
for col in self.cols:
col.render(col)
self.ap('\n')
if setscrollbar and self.auto_scrollbar:
numrows = int(self.text.index('end').split('.')[0])-2
h = int(self.text['height'])
needs_scrollbar = numrows > h
if needs_scrollbar != self.uses_scrollbar:
self.setscrollbar(needs_scrollbar)
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = DISABLED
def resize(self, dx, dy, setscrollbar=1):
dx = int(dx)
oldwidth = self.pixwidth
newwidth = self.pixwidth + dx
if newwidth < self.chdx * 2:
newwidth = self.chdx * 2
self.pixwidth = newwidth
dx = newwidth - oldwidth
charwidth, extra = divmod(newwidth, self.chdx)
self.kindcol.width = max(charwidth - self.widthbeforekind - 1, self.minkindwidth)
self.totxresize += dx
for t in (self.tothead, self.rowhead, self.text):
t['width'] = charwidth
t['padx'] = self.minpadx + extra / 2
dy = int(dy)
rowresize, extra = divmod(self.totyresize + dy, self.chdy)
newheight = self.orgtextheight + rowresize
oldheight = int(self.text['height'])
if newheight < self.mintextheight:
newheight = self.mintextheight
dy = (newheight - oldheight) * self.chdy + extra
self.totyresize += dy
self.text['height'] = newheight
self.text['pady'] = self.textminpady + extra / 2
self.update(force=1, setscrollbar=1)
return dx, dy
class Filler:
def __init__(self, master):
self.frame = self.can = Canvas(
master,
#background='blue',
width=0,
height=0)
def getsize(self):
return int(self.can['width']),int(self.can['height']),
def setsize(self, w, h):
self.can.configure(
width = w,
height = h
)
def resize(self, dw, dh):
w, h = self.getsize()
self.setsize(max(0, w + dw), max(0, h + dh))
class Row:
def __init__(self, count, size, name):
self.count = count
self.size = size
self.name = name
class Sample:
def __init__(self, mod, stat, name, index, numkindrows=None, statype='Size', relative=None):
self.stat = stat
self.size = stat.size
self.count = stat.count
self.name = name
self.index = index
self.datetime = mod.time.asctime(mod.time.localtime(stat.timemade))
self.kindrows = {}
if numkindrows is not None:
rows = stat.get_rows_n_and_other(numkindrows, statype)
for r in rows:
self.kindrows[r.name] = r
else:
kinds = []
oidx = None
for row in relative.rows:
if row.name == '<Other>':
oidx = len(kinds)
continue
else:
kinds.append(row.name)
rows = stat.get_rows_of_kinds(kinds)
size = 0
count = 0
for i, row in enumerate(rows):
kind = kinds[i]
if row is None:
row = Row(0, 0, kind)
self.kindrows[kind] = row
size += row.size
count += row.count
if oidx is not None:
other = Row(stat.count - count, stat.size - size, '<Other>')
rows[oidx:oidx] = [other]
self.kindrows['<Other>'] = other
self.rows = rows
class ProfileBrowser:
colors = ("red", "green", "blue", "yellow", "magenta", "cyan", 'white')
numkindrows = 10
def __init__(self, app, filename):
self.inited = 0
self.app = app
self.mod = mod = app.mod
self.master = master = app.root
if filename:
filename = mod.path.abspath(filename)
self.initialdir = mod.path.dirname(filename)
else:
self.initialdir = mod.os.getcwd()
self.frame = frame = Toplevel(
master,
#background='#bbb'
)
#frame['cursor'] = 'umbrella'
#frame.resizable(True,True)
self.menubar = Frame(self.frame, relief=RAISED, bd=2)
self.filebutton = Menubutton(self.menubar, text='File')
self.filemenu = Menu(self.filebutton)
self.filebutton['menu'] = self.filemenu
self.filemenu.add_command(label='New Profile Browser', command=self.cmd_new)
self.filemenu.add_command(label='Open Profile', command=self.cmd_open)
self.filemenu.add_command(label='Close Window', command=self.cmd_close)
self.filemenu.add_command(label='Clear Cache', command=self.cmd_clear_cache)
self.filemenu.add_command(label='Exit', command=self.cmd_exit)
self.panebutton = Menubutton(self.menubar, text='Pane')
self.panemenu = Menu(self.panebutton)
self.panebutton['menu'] = self.panemenu
choices = [
('Bars', 'Lines'),
('Size', 'Count'),
]
self.graphtypevar = StringVar()
self.graphbutton = self.modechooser(
self.menubar, 'Graph', choices,
self.graphtypevar, self.cmd_graphtype)
choices = [
('A', 'B'),
('Size', 'Count'),
('%A:Tot', '%B:Tot'),
('Cumul', 'A-B', 'B-A'),
('%A:Tot', '%B:Tot'),
('Kind',),
]
self.var_tablemode=StringVar()
self.tablebutton = Menubutton(self.menubar, text='Table')
self.tablemenu = Menu(self.tablebutton)
self.tablebutton['menu'] = self.tablemenu
self.headermenu = Menu(self.tablebutton, title='Table header')
self.addmodechooser(
self.headermenu,
choices,
self.var_tablemode,
self.cmd_tablemode
)
self.tablemenu.add_cascade(label='Header',menu=self.headermenu)
self.var_tablescrollbar = StringVar()
self.tablescrollbarmenu = Menu(self.tablebutton, title = 'Table scrollbar')
self.addmodechooser(
self.tablescrollbarmenu,
[('Auto', 'On', 'Off')],
self.var_tablescrollbar,
self.cmd_tablescrollbar
)
self.tablemenu.add_cascade(
label='Scrollbar',
menu = self.tablescrollbarmenu)
self.windowmenu = WindowMenu(self.menubar, self.app.var_window)
self.window = app.add_window_frame(self.frame, self.windowmenu)
self.helpbutton = Menubutton(self.menubar, text='Help')
self.helpmenu = Menu(self.helpbutton)
self.helpbutton['menu'] = self.helpmenu
self.helpmenu.add_command(label='About', command=self.cmd_about)
self.helpmenu.add_command(label='Help', command=self.cmd_help)
self.ctrlframe = Frame(
self.frame,
bd=2,
relief=GROOVE,
#background='#999',
)
self.exitbutton = Button(self.ctrlframe, text='Exit', command=self.cmd_exit,
background='red')
self.set_filename(filename)
self.id_collect = None
self.collecting = IntVar()
self.collecting.set(0)
self.collectbutton = Checkbutton(self.ctrlframe, text='Collect',
variable = self.collecting,
command=self.cmd_collect,
relief=RAISED)
self.stats = Stats(self.mod)
self.disptab = Frame(self.frame,
#relief=SUNKEN,
#bd=3
)
self.display = Display(self.disptab,
scale_table = AxisControl.scale_table,
numkindrows = self.numkindrows,
getkindcolor = self.getkindcolor,
)
self.xcontrol = AxisControl(self.ctrlframe,
name = 'X',
range = self.display.xrange,
grid = self.display.xgrid,
unit = 'samples',
rangecommand = self.display.setxrange,
gridcommand = self.display.setxgrid
)
self.ycontrol = AxisControl(self.ctrlframe,
name = 'Y',
range = self.display.yrange,
grid = self.display.ygrid,
unit = 'bytes',
rangecommand = self.display.setyrange,
gridcommand = self.display.setygrid,
autocommand = self.display.cmd_yrange_auto
)
self.display.xcontrol = self.xcontrol
self.display.ycontrol = self.ycontrol
self.mcontrols = []
self.mcontrolbyname = {}
for name in ('A', 'B'):
marker = self.display.new_xmarker(name)
control = MarkerControl(self.ctrlframe, marker, self.update_tableframe)
marker.set_poscommand(control.setsample)
self.mcontrols.append(control)
self.mcontrolbyname[name] = control
if 0:
self.optionsmenu.add_checkbutton(
label='X grid',
variable = self.display.var_xgrid,
command = self.display.cmd_xgrid)
self.optionsmenu.add_checkbutton(
label='Y grid',
variable = self.display.var_ygrid,
command = self.display.cmd_ygrid)
self.var_showcontrol=BooleanVar()
self.var_showcontrol.set(1)
self.panemenu.add_checkbutton(
label='Show Control Panel',
variable = self.var_showcontrol,
command = self.cmd_showcontrol)
self.var_showgraph=BooleanVar()
self.var_showgraph.set(1)
self.panemenu.add_checkbutton(
label='Show Graph',
variable = self.var_showgraph,
command = self.cmd_showgraph)
self.var_showtable=BooleanVar()
self.var_showtable.set(1)
self.panemenu.add_checkbutton(
label='Show Table',
variable = self.var_showtable,
command = self.cmd_showtable)
tf = self.tf = TableFrame(self, self.disptab)
d_t = self.d_t = PaneDiv(self.disptab, movecommand=self.cmd_dt_moved)
if 0:
self.ycontrol.frame.pack(side=LEFT, padx=3,pady=3)
self.xcontrol.frame.pack(side=LEFT, padx=3,pady=3)
self.scontrol.frame.pack(side=LEFT, padx=3, pady=3)
self.graphtypeframe.pack(side=LEFT, padx=3,pady=3)
self.collectbutton.pack(side=LEFT, padx=3,pady=3)
else:
self.xcontrol.frame.grid(row=0,column=0, padx=3,pady=3, sticky=W)
self.ycontrol.frame.grid(row=1,column=0, padx=3,pady=3)
self.mcontrols[0].frame.grid(row=0,column=1, columnspan=1,sticky=W,padx=3,pady=3)
self.mcontrols[1].frame.grid(row=1,column=1, columnspan=1,sticky=W,padx=3,pady=3)
self.exitbutton.grid(row=0,column=2, padx=3,pady=3)
self.collectbutton.grid(row=0,column=3, padx=3,pady=3)
self.filler = Filler(self.frame)
if 1:
self.filebutton.pack(side=LEFT)
self.panebutton.pack(side=LEFT)
self.graphbutton.pack(side=LEFT)
self.tablebutton.pack(side=LEFT)
self.windowmenu.button.pack(side=LEFT)
self.helpbutton.pack(side=LEFT)
self.menubar.grid(column=0,columnspan=4, sticky=N+W+E)
self.gridmain()
if 0:
self.display.frame.grid(row = 0, column = 0, sticky=N+W, padx=3,pady=3)
tf.frame.grid(row=0, column=1, sticky=S+E, padx=3,pady=3)
self.ctrlframe.grid(row=1,column=0, columnspan=2, sticky=W)
frame.bind('<Map>', self.event_map)
self.tf.setmode(self.var_tablemode.get(), self.numkindrows)
self.load_filename(filename)
d_t.frame.update_idletasks()
d_t.setheight(max(self.display.frame.winfo_height(),
tf.frame.winfo_height()))
d_t.frame.update_idletasks()
self.minsize = (500,400)
self.maxsize = (self.frame.winfo_screenwidth(), self.frame.winfo_screenheight())
minsizes = {
# (ctrl, disp, tab) : (width, height)
(0,0,0): (270, 25),
(1,0,0): (363, 61),
(0,1,0): (270, 131),
(1,1,0): (270, 131),
}
self.setusergeometry()
def initfinal():
self.tf.setchdim()
rx = self.frame.winfo_rootx() + self.frame.winfo_width()
self.tf_wanted_margin = rx - (self.tf.frame.winfo_rootx() + self.tf.frame.winfo_width())
self.lastw = self.frame.winfo_width()
self.lasth = self.frame.winfo_height()
self.in_configure = 0
frame.bind('<Configure>', self.event_configure)
self.inited = 1
initfinal()
#self.frame.after_idle(initfinal)
def cmd_about(self):
self.cmd_help('about')
def cmd_help(self, pickname='help'):
os = self.mod.os
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
m = self.mod.Text.gsltextviewer(
self.frame,
inpickle = getattr(self.mod.pbhelp, pickname)
#htmloutfile='/tmp/x.html',
)
self.app.add_window_frame(m)
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def cmd_clear_cache(self):
self.stats.clear_cache()
def cmd_close(self):
self.frame.destroy()
def cmd_collect(self, *args):
#self.afterfunc()
#self.frame.after(1, self.afterfunc) # Turn on button first.??
if self.collecting.get():
self.event_collect()
else:
if self.id_collect is not None:
self.frame.after_cancel(self.id_collect)
self.id_collect = None
def event_collect(self):
o, n = self.stats.collect()
if n:
if o != self.display.numstats:
self.display.load_stats(self.stats)
else:
st = self.stats[-n:]
self.display.add_stats(st)
for c in self.mcontrols:
c.setnumsamples(len(self.stats))
self.id_collect = self.frame.after(1000, self.event_collect)
def cmd_dt_moved(self, dx):
# The division between display and table panes moved.
# Disable configure event handling while we are resizing.
self.in_configure += 1
# Right x position of enclosing frame
rx = self.frame.winfo_rootx() + self.frame.winfo_width()
# Right margin between pane divider and enclosing window
mx = rx - (self.d_t.frame.winfo_rootx() + self.d_t.frame.winfo_width())
# Don't move pane divider outside window
dx = min(dx, mx)
# Right margin between table and enclosing window
# before resizing
mx = rx - (self.tf.frame.winfo_rootx() + self.tf.frame.winfo_width())
dx, _ = self.display.resize(dx, 0)
wanted_margin = self.tf_wanted_margin
# After move
mx -= dx
self.tf.resize(mx - wanted_margin, 0)
self.display.moveback()
self.in_configure -= 1
def cmd_exit(self):
self.app.exit()
def cmd_graphtype(self):
self.display.setgraphtype(self.graphtypevar.get(), self.stats)
self.cmd_tablemode()
def cmd_new(self):
self.app.new_profile_browser(self.filename)
def cmd_open(self):
op = tkFileDialog.Open(self.frame,
# ? Should we have default extension or not??
# defaultextension='.hpy',
initialdir = self.initialdir,
filetypes=[('Heapy data files','.hpy'),
('All files', '*')
]
)
filename = op.show()
if filename:
self.load_filename(filename)
def cmd_showcontrol(self):
self.grid_things()
def cmd_showgraph(self):
if self.var_showgraph.get() and self.var_showtable.get():
self.tf.resize(-self.tf.totxresize, 0)
self.display.resize(self.display.orgwidth - self.display.botx, 0)
self.display.moveback()
self.grid_things()
cmd_showtable = cmd_showgraph
def cmd_tablemode(self):
self.tf.setmode(self.var_tablemode.get(), self.numkindrows)
self.tf.update()
def cmd_tablescrollbar(self):
tf = self.tf
s = self.var_tablescrollbar.get()
if s == 'Auto':
tf.auto_scrollbar = 1
tf.update(force=1, setscrollbar=1)
elif s == 'On':
tf.auto_scrollbar = 0
tf.setscrollbar(1)
elif s == 'Off':
tf.auto_scrollbar = 0
tf.setscrollbar(0)
else:
assert 0
def setusergeometry(self):
# Make the geometry of the window be user-specified
# This is called after Tk has determined the size
# of the window needed for the initial widget configuration.
# The size is not to be changed after that, other than
# on user request.
# I couldn't just do frame.geometry(frame.geometry()) because,
# presumably, of a bug in the Tk and/or wm I am using. I hope
# this works for all systems .. Notes 26 Oct 2005.
self.frame.update()
g = '%dx%d+%d+%d'%(
self.frame.winfo_width(),
self.frame.winfo_height(),
self.frame.winfo_rootx(),
self.frame.winfo_rooty())
self.frame.geometry(g)
def modechooser(self, frame, name, choices, cmdvar, command):
button = Menubutton(frame, text=name)
menu = Menu(button)
button['menu'] = menu
self.addmodechooser(menu, choices, cmdvar, command)
return button
def addmodechooser(self, menu, choices, cmdvar, command):
def setcmdvar():
cmdvar.set(' '.join([v.get() for v in vars]))
def cmd():
setcmdvar()
command()
vars = []
for ch in choices:
var = StringVar()
vars.append(var)
var.set(ch[0])
for a in ch:
menu.add_radiobutton(
command = cmd,
label = a,
value=a,
variable=var,
#font=('Courier','12', 'bold'),
#font=('Helvetica','12', 'bold'),
columnbreak = (a == ch[0])
)
setcmdvar()
def grid_things(self):
ow = self.frame.winfo_width()
oh = self.frame.winfo_height()
self.ctrlframe.grid_forget()
self.display.frame.grid_forget()
self.d_t.frame.grid_forget()
self.tf.frame.grid_forget()
self.disptab.grid_forget()
self.filler.frame.grid_forget()
self.gridmain()
self.frame.update_idletasks()
self.sizewidgets()
def gridmain(self):
row = 1
c = self.var_showcontrol.get()
if c:
self.ctrlframe.grid(row=row,column=0, columnspan=3, padx=3,pady=3,sticky=W)
row += 1
column = 0
g = self.var_showgraph.get()
t = self.var_showtable.get()
gt = (g, t)
if g:
self.display.frame.grid(row=0, column = column, sticky=N+W,
padx=3,pady=3
)
column += 1
if g and t:
self.d_t.frame.grid(row=0, column=column, sticky=N+W)
column += 1
if t:
self.tf.frame.grid(row=0, column=column, sticky=N+W
, padx=3,pady=3
)
if g or t:
self.disptab.grid(row=row, column=0,
sticky=N+W,
#padx=3,pady=3,
)
row += 1
self.filler.setsize(0, 0)
self.filler.frame.grid(row=row,column=3, sticky=N+W)
if 0 and not (g or t):
self.frame.resizable(0,0)
else:
self.frame.resizable(1,1)
def event_configure(self, event):
if event.widget is not self.frame:
return
if not self.inited:
return
if self.in_configure:
return
curw = self.frame.winfo_width()
curh = self.frame.winfo_height()
if curw == self.lastw and curh == self.lasth:
return
self.in_configure += 1
self.lastw = curw
self.lasth = curh
self.sizewidgets()
self.in_configure -= 1
def sizewidgets(self):
self.frame.update()
curw = self.frame.winfo_width()
curh = self.frame.winfo_height()
mbx = self.menubar.winfo_rootx()
mby = self.menubar.winfo_rooty()
sfs = []
if self.var_showgraph.get():
sfs.append(self.display)
if self.var_showtable.get():
sfs.append(self.tf)
if not sfs:
sfs.append(self.filler)
dys = {}
didh = 0
for sf in sfs:
f = sf.frame
diy = f.winfo_rooty()
dih = f.winfo_height()
ch = diy - mby + dih
dy = curh - ch - 7
didh = didh or dy
dys[sf] = dy
if self.var_showtable.get():
f = self.tf.frame
elif self.var_showgraph.get():
f = self.display.frame
else:
f = self.filler.frame
fx = f.winfo_rootx()
fw = f.winfo_width()
cw = fx - mbx + fw
fdw = curw - cw - 6
if f is self.filler.frame and not self.var_showcontrol.get():
fdw = curw - self.filler.getsize()[0] - 3
if didh or fdw:
if self.var_showgraph.get() and self.var_showtable.get():
dprop = float(self.display.frame.winfo_width())
dprop = dprop / (dprop + self.tf.frame.winfo_width())
dx, dy = self.display.resize(fdw * dprop, dys[self.display])
self.tf.resize(fdw - dx, dys[self.tf])
self.frame.update_idletasks()
self.d_t.setheight(max(self.display.frame.winfo_height(),
self.tf.frame.winfo_height()))
elif self.var_showgraph.get():
self.display.resize(fdw, dys[self.display])
elif self.var_showtable.get():
self.tf.resize(fdw, dys[self.tf])
else:
self.filler.resize(fdw, dys[self.filler])
self.filler.setsize(self.filler.getsize()[0],1000)
if self.var_showgraph.get():
self.display.moveback()
#self.resize(dw, dh)
def resize(self, dw, dh):
self.display.resize(dw, dh)
#self.frame.wm_geometry('')
def event_map(self, event):
self.frame.unbind('<Map>')
self.frame.bind('<Unmap>', self.event_unmap)
self.frame.lift()
def event_unmap(self, event):
self.frame.unbind('<Unmap>')
self.frame.bind('<Map>', self.event_map)
def load_filename(self, filename):
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
if filename:
filename = self.mod.path.abspath(filename)
try:
self.stats.open(filename)
except:
etype, value, tb = self.mod._root.sys.exc_info()
tkMessageBox.showerror(
master=self.frame,
message = (
"Error when loading\n%r:\n"%filename+
"%s"%''.join(self.mod._root.traceback.format_exception_only(
etype, value)))
)
else:
self.display.load_stats(self.stats)
for c in self.mcontrols:
c.setnumsamples(len(self.stats))
#self.scontrol.trackcommand(1)
self.set_filename(filename)
self.xrange_fit()
self.display.xview_moveto(0)
self.mcontrols[1].settracking(0)
self.mcontrols[0].settracking(1)
self.yrange_fit()
self.tf.update(force=1)
if filename:
self.initialdir = self.mod.path.dirname(filename)
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def update_tableframe(self):
self.tf.update()
def getkindcolor(self, kind):
if kind == '<Other>':
return 'black'
else:
return self.colors[abs(hash(kind))%len(self.colors)]
def set_filename(self, filename):
self.filename = filename
if not filename:
filename = '<No File>'
title = 'Heapy Profile Browser: %s'%filename
self.window.title(title)
def setnormpos(self):
self.setscrollregion()
if self.ymax >= self.yrange:
self.yrange_fit()
if self.xi0 is None:
self.drawxaxis()
else:
self.updatexaxis()
self.track()
def redraw_all(self):
pass
def trackoff(self):
self.rcontrol.settracking(0)
def xrange_fit(self):
self.xcontrol.fit(len(self.stats))
def yrange_fit(self):
self.display.yrange_auto(force=1)
class _GLUECLAMP_:
_imports_ = (
'_parent:Use',
'_parent:pbhelp',
'_root.guppy.etc:textView',
'_root.guppy:specs',
'_root:md5',
'_root:os',
'_root.os:path',
'_root:time',
'_root.guppy.gsl:Text',
)
def pb(self, filename=None):
"""pb( [filename: profilefilename+])
Create a Profile Browser window.
Argument
filename: profilefilename+
The name of a file containing profile data.
See also
Heapy Profile Browser[1]
Screenshot[2]
References
[0] heapy_Use.html#heapykinds.Use.pb
[1] ProfileBrowser.html
[2] pbscreen.jpg"""
pa = ProfileApp(self)
pa.new_profile_browser(filename)
pa.mainloop()
def tpg(self):
self('/tmp/x.hpy')
| apache-2.0 |
jp-bpl/configuration | util/vpc-tools/tag-old-ebs.py | 62 | 7679 | """
For a given aws account, go through all un-attached volumes and tag them.
"""
import boto
import boto.utils
import argparse
import logging
import subprocess
import time
import os
from os.path import join, exists, isdir, islink, realpath, basename, dirname
import yaml
# needs to be pip installed
import netaddr
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
log_level = logging.INFO
def tags_for_hostname(hostname, mapping):
logging.debug("Hostname is {}".format(hostname))
if not hostname.startswith('ip-'):
return {}
octets = hostname.lstrip('ip-').split('-')
tags = {}
# Update with env and deployment info
tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]])
ip_addr = netaddr.IPAddress(".".join(octets))
for key, value in mapping['CIDR_REST'].items():
cidr = ".".join([
mapping['CIDR_FIRST_OCTET'],
octets[1],
key])
cidrset = netaddr.IPSet([cidr])
if ip_addr in cidrset:
tags.update(value)
return tags
def potential_devices(root_device):
device_dir = dirname(root_device)
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
all_devices = filter(relevant_devices, all_devices)
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
return map(lambda x: join(device_dir, x), all_devices)
def get_tags_for_disk(mountpoint):
tag_data = {}
# Look at some files on it to determine:
# - hostname
# - environment
# - deployment
# - cluster
# - instance-id
# - date created
hostname_file = join(mountpoint, "etc", "hostname")
edx_dir = join(mountpoint, 'edx', 'app')
if exists(hostname_file):
# This means this was a root volume.
with open(hostname_file, 'r') as f:
hostname = f.readline().strip()
tag_data['hostname'] = hostname
if exists(edx_dir) and isdir(edx_dir):
# This is an ansible related ami, we'll try to map
# the hostname to a knows deployment and cluster.
cluster_tags = tags_for_hostname(hostname, mappings)
tag_data.update(cluster_tags)
else:
# Not an ansible created root volume.
tag_data['cluster'] = 'unknown'
else:
# Not a root volume
tag_data['cluster'] = "unknown"
instance_file = join(mountpoint, "var", "lib", "cloud", "instance")
if exists(instance_file) and islink(instance_file):
resolved_path = realpath(instance_file)
old_instance_id = basename(resolved_path)
tag_data['instance-id'] = old_instance_id
return tag_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.")
parser.add_argument("--profile", '-p',
help="AWS Profile to use with boto.")
parser.add_argument("--noop", "-n", action="store_true",
help="Don't actually tag anything.")
parser.add_argument("--verbose", "-v", action="store_true",
help="More verbose output.")
parser.add_argument("--device", "-d", default="/dev/xvdf",
help="The /dev/??? where the volume should be mounted.")
parser.add_argument("--mountpoint", "-m", default="/mnt",
help="Location to mount the new device.")
parser.add_argument("--config", "-c", required=True,
help="Configuration to map hostnames to tags.")
# The config should specify what tags to associate with the second
# and this octet of the hostname which should be the ip address.
# example:
args = parser.parse_args()
mappings = yaml.safe_load(open(args.config,'r'))
# Setup Logging
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=log_level)
# setup boto
ec2 = boto.connect_ec2(profile_name=args.profile)
# get mounting args
id_info = boto.utils.get_instance_identity()['document']
instance_id = id_info['instanceId']
az = id_info['availabilityZone']
root_device = args.device
mountpoint = args.mountpoint
# Find all unattached volumes
filters = { "status": "available", "availability-zone": az }
potential_volumes = ec2.get_all_volumes(filters=filters)
logging.debug("Found {} unattached volumes in {}".format(len(potential_volumes), az))
for vol in potential_volumes:
if "cluster" in vol.tags:
continue
# Attach volume to the instance running this process
logging.debug("Trying to attach {} to {} at {}".format(
vol.id, instance_id, root_device))
try:
ec2.attach_volume(vol.id, instance_id, root_device)
# Wait for the volume to finish attaching.
waiting_msg = "Waiting for {} to be available at {}"
timeout = TIMEOUT
while not exists(root_device):
time.sleep(2)
logging.debug(waiting_msg.format(vol.id, root_device))
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while attaching {}.".format(vol.id))
exit(1)
# Because a volume might have multiple mount points
devices_on_volume = potential_devices(root_device)
if len(devices_on_volume) != 1:
vol.add_tag("devices_on_volume", str(devices_on_volume))
# Don't tag in this case because the different devices
# may have conflicting tags.
logging.info("Skipping {} because it has multiple mountpoints.".format(vol.id))
logging.info("{} has mountpoints {}".format(vol.id, str(devices_on_volume)))
else:
device = devices_on_volume[0]
try:
# Mount the volume
subprocess.check_call(["sudo", "mount", device, mountpoint])
# Learn all tags we can know from content on disk.
tag_data = get_tags_for_disk(mountpoint)
tag_data['created'] = vol.create_time
# If they are found tag the instance with them
if args.noop:
logging.info("Would have tagged {} with: \n{}".format(vol.id, str(tag_data)))
else:
logging.info("Tagging {} with: \n{}".format(vol.id, str(tag_data)))
vol.add_tags(tag_data)
finally:
# Un-mount the volume
subprocess.check_call(['sudo', 'umount', mountpoint])
finally:
# Need this to be a function so we always re-check the API for status.
is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available"
timeout = TIMEOUT
while exists(root_device) or is_attached(vol.id):
if is_attached(vol.id):
try:
# detach the volume
ec2.detach_volume(vol.id)
except boto.exception.EC2ResponseError as e:
logging.warning("Failed to detach volume. Will try again in a bit.")
time.sleep(2)
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while detaching {}.".format(vol.id))
exit(1)
logging.debug("Waiting for {} to be detached.".format(vol.id))
| agpl-3.0 |
lucidfrontier45/scikit-learn | examples/covariance/plot_covariance_estimation.py | 2 | 4991 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
The usual estimator for covariance is the maximum likelihood estimator,
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotical optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
| bsd-3-clause |
sorgerlab/bioagents | bioagents/tests/test_model_diagnoser.py | 2 | 2836 | from indra.statements import *
from bioagents.mra.model_diagnoser import ModelDiagnoser
from indra.assemblers.pysb import PysbAssembler
from nose.plugins.attrib import attr
drug = Agent('PLX4720')
raf = Agent('RAF', db_refs={'FPLX': 'RAF'})
mek = Agent('MEK', db_refs={'FPLX': 'MEK'})
erk = Agent('ERK', db_refs={'FPLX': 'ERK'})
def test_missing_activity1():
stmts = [Activation(raf, mek), Phosphorylation(mek, erk)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 1
assert suggs[0].enz.name == 'MEK'
assert suggs[0].enz.activity
assert suggs[0].enz.activity.activity_type == 'activity'
def test_missing_activity2():
stmts = [Inhibition(drug, raf), Activation(raf, mek)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 1
assert suggs[0].subj.name == 'RAF'
assert suggs[0].subj.activity
assert suggs[0].subj.activity.activity_type == 'activity'
def test_missing_activity3():
stmts = [Activation(raf, mek), Activation(raf, erk)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 0
def test_check_model():
explain = Activation(raf, erk)
mek_active = Agent('MEK', db_refs={'FPLX': 'MEK'},
activity=ActivityCondition('activity', True))
model_stmts = [Activation(raf, mek), Activation(mek_active, erk)]
# Build the pysb model
pa = PysbAssembler()
pa.add_statements(model_stmts)
pa.make_model(policies='one_step')
md = ModelDiagnoser(model_stmts, pa.model, explain)
result = md.check_explanation()
assert result['has_explanation'] is True
path = result['explanation_path']
assert len(path) == 2
assert path[0] == model_stmts[0]
assert path[1] == model_stmts[1]
@attr('nonpublic')
def test_propose_statement():
jun = Agent('JUN', db_refs={'HGNC':'6204', 'UP': 'P05412'})
explain = Activation(raf, jun)
erk_active = Agent('ERK', db_refs={'FPLX': 'ERK'},
activity=ActivityCondition('activity', True))
# Leave out MEK activates ERK
model_stmts = [Activation(raf, mek), Activation(erk_active, jun)]
# Build the pysb model
pa = PysbAssembler()
pa.add_statements(model_stmts)
pa.make_model(policies='one_step')
md = ModelDiagnoser(model_stmts, pa.model, explain)
result = md.check_explanation()
assert result['has_explanation'] is False
assert result.get('explanation_path') is None
inf_prop = result.get('connect_rules')
assert inf_prop == ('RAF_activates_MEK_activity',
'ERK_act_activates_JUN_activity'), inf_prop
stmt_prop = result.get('connect_stmts')
assert stmt_prop == (model_stmts[0], model_stmts[1])
stmt_suggestions = md.suggest_statements(*stmt_prop)
| bsd-2-clause |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/idlelib/CallTips.py | 43 | 7941 | """CallTips.py - An IDLE Extension to Jog Your Memory
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import re
import sys
import types
from idlelib import CallTipWindow
from idlelib.HyperParser import HyperParser
import __main__
class CallTips:
menudefs = [
('edit', [
("Show call tip", "<<force-open-calltip>>"),
])
]
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
return
self.editwin = editwin
self.text = editwin.text
self.calltip = None
self._make_calltip_window = self._make_tk_calltip_window
def close(self):
self._make_calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return CallTipWindow.CallTip(self.text)
def _remove_calltip_window(self, event=None):
if self.calltip:
self.calltip.hidetip()
self.calltip = None
def force_open_calltip_event(self, event):
"""Happens when the user really wants to open a CallTip, even if a
function call is needed.
"""
self.open_calltip(True)
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a CallTip, but not really
necessary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
"""If there is already a calltip window, check if it is still needed,
and if so, reload it.
"""
if self.calltip and self.calltip.is_active():
self.open_calltip(False)
def open_calltip(self, evalfuncs):
self._remove_calltip_window()
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
expression = hp.get_expression()
if not expression or (not evalfuncs and expression.find('(') != -1):
return
arg_text = self.fetch_tip(expression)
if not arg_text:
return
self.calltip = self._make_calltip_window()
self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class
If there is a Python subprocess, get the calltip there. Otherwise,
either fetch_tip() is running in the subprocess itself or it was called
in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(expression,), {})
else:
entity = self.get_entity(expression)
return get_arg_text(entity)
def get_entity(self, expression):
"""Return the object corresponding to expression evaluated
in a namespace spanning sys.modules and __main.dict__.
"""
if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(expression, namespace)
except BaseException:
# An uncaught exception closes idle, and eval can raise any
# exception, especially if user classes are involved.
return None
def _find_constructor(class_ob):
# Given a class object, return a function object used for the
# constructor (ie, __init__() ) or None if we can't find one.
try:
return class_ob.__init__.im_func
except AttributeError:
for base in class_ob.__bases__:
rc = _find_constructor(base)
if rc is not None: return rc
return None
def get_arg_text(ob):
"""Get a string describing the arguments for the given object,
only if it is callable."""
arg_text = ""
if ob is not None and hasattr(ob, '__call__'):
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text
#################################################
#
# Test code
#
if __name__=='__main__':
def t1(): "()"
def t2(a, b=None): "(a, b=None)"
def t3(a, *args): "(a, ...)"
def t4(*args): "(...)"
def t5(a, *args): "(a, ...)"
def t6(a, b=None, *args, **kw): "(a, b=None, ..., ***)"
def t7((a, b), c, (d, e)): "(<tuple>, c, <tuple>)"
class TC(object):
"(ai=None, ...)"
def __init__(self, ai=None, *b): "(ai=None, ...)"
def t1(self): "()"
def t2(self, ai, b=None): "(ai, b=None)"
def t3(self, ai, *args): "(ai, ...)"
def t4(self, *args): "(...)"
def t5(self, ai, *args): "(ai, ...)"
def t6(self, ai, b=None, *args, **kw): "(ai, b=None, ..., ***)"
def t7(self, (ai, b), c, (d, e)): "(<tuple>, c, <tuple>)"
def test(tests):
ct = CallTips()
failed=[]
for t in tests:
expected = t.__doc__ + "\n" + t.__doc__
name = t.__name__
# exercise fetch_tip(), not just get_arg_text()
try:
qualified_name = "%s.%s" % (t.im_class.__name__, name)
except AttributeError:
qualified_name = name
arg_text = ct.fetch_tip(qualified_name)
if arg_text != expected:
failed.append(t)
fmt = "%s - expected %s, but got %s"
print fmt % (t.__name__, expected, get_arg_text(t))
print "%d of %d tests failed" % (len(failed), len(tests))
tc = TC()
tests = (t1, t2, t3, t4, t5, t6, t7,
TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7)
test(tests)
| apache-2.0 |
felixma/nova | nova/api/openstack/compute/schemas/fixed_ips.py | 79 | 1027 | # Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
reserve = {
'type': 'object',
'properties': {
'reserve': parameter_types.none,
},
'required': ['reserve'],
'additionalProperties': False,
}
unreserve = {
'type': 'object',
'properties': {
'unreserve': parameter_types.none,
},
'required': ['unreserve'],
'additionalProperties': False,
}
| apache-2.0 |
apache/airflow | airflow/providers/google/cloud/example_dags/example_translate_speech.py | 3 | 3196 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import models
from airflow.providers.google.cloud.operators.text_to_speech import CloudTextToSpeechSynthesizeOperator
from airflow.providers.google.cloud.operators.translate_speech import CloudTranslateSpeechOperator
from airflow.utils import dates
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BUCKET_NAME = os.environ.get("GCP_TRANSLATE_SPEECH_TEST_BUCKET", "INVALID BUCKET NAME")
# [START howto_operator_translate_speech_gcp_filename]
FILENAME = "gcp-speech-test-file"
# [END howto_operator_translate_speech_gcp_filename]
# [START howto_operator_text_to_speech_api_arguments]
INPUT = {"text": "Sample text for demo purposes"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "LINEAR16"}
# [END howto_operator_text_to_speech_api_arguments]
# [START howto_operator_translate_speech_arguments]
CONFIG = {"encoding": "LINEAR16", "language_code": "en_US"}
AUDIO = {"uri": f"gs://{BUCKET_NAME}/{FILENAME}"}
TARGET_LANGUAGE = 'pl'
FORMAT = 'text'
MODEL = 'base'
SOURCE_LANGUAGE = None # type: None
# [END howto_operator_translate_speech_arguments]
with models.DAG(
"example_gcp_translate_speech",
schedule_interval=None, # Override to match your needs
start_date=dates.days_ago(1),
tags=['example'],
) as dag:
text_to_speech_synthesize_task = CloudTextToSpeechSynthesizeOperator(
project_id=GCP_PROJECT_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=BUCKET_NAME,
target_filename=FILENAME,
task_id="text_to_speech_synthesize_task",
)
# [START howto_operator_translate_speech]
translate_speech_task = CloudTranslateSpeechOperator(
project_id=GCP_PROJECT_ID,
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task',
)
translate_speech_task2 = CloudTranslateSpeechOperator(
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task2',
)
# [END howto_operator_translate_speech]
text_to_speech_synthesize_task >> translate_speech_task >> translate_speech_task2
| apache-2.0 |
fusionpig/ansible | v1/tests/TestSynchronize.py | 103 | 6958 |
import unittest
import getpass
import os
import shutil
import time
import tempfile
from nose.plugins.skip import SkipTest
from ansible.runner.action_plugins.synchronize import ActionModule as Synchronize
class FakeRunner(object):
def __init__(self):
self.connection = None
self.transport = None
self.basedir = None
self.sudo = None
self.remote_user = None
self.private_key_file = None
self.check = False
self.become = False
self.become_method = 'sudo'
self.become_user = False
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None,
persist_files=False, complex_args=None, delete_remote_tmp=True):
self.executed_conn = conn
self.executed_tmp = tmp
self.executed_module_name = module_name
self.executed_args = args
self.executed_async_jid = async_jid
self.executed_async_module = async_module
self.executed_async_limit = async_limit
self.executed_inject = inject
self.executed_persist_files = persist_files
self.executed_complex_args = complex_args
self.executed_delete_remote_tmp = delete_remote_tmp
def noop_on_check(self, inject):
return self.check
class FakeConn(object):
def __init__(self):
self.host = None
self.delegate = None
class TestSynchronize(unittest.TestCase):
def test_synchronize_action_basic(self):
""" verify the synchronize action plugin sets
the delegate to 127.0.0.1 and remote path to user@host:/path """
runner = FakeRunner()
runner.remote_user = "root"
runner.transport = "ssh"
conn = FakeConn()
inject = {
'inventory_hostname': "el6.lab.net",
'inventory_hostname_short': "el6",
'ansible_connection': None,
'ansible_ssh_user': 'root',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_complex_args == {"dest":"[email protected]:/tmp/bar", "src":"/tmp/foo"}, "wrong args used"
assert runner.sudo == None, "sudo was not reset to None"
def test_synchronize_action_sudo(self):
""" verify the synchronize action plugin unsets and then sets sudo """
runner = FakeRunner()
runner.become = True
runner.remote_user = "root"
runner.transport = "ssh"
conn = FakeConn()
inject = {
'inventory_hostname': "el6.lab.net",
'inventory_hostname_short': "el6",
'ansible_connection': None,
'ansible_ssh_user': 'root',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_complex_args == {'dest':'[email protected]:/tmp/bar',
'src':'/tmp/foo',
'rsync_path':'"sudo rsync"'}, "wrong args used"
assert runner.become == True, "sudo was not reset to True"
def test_synchronize_action_local(self):
""" verify the synchronize action plugin sets
the delegate to 127.0.0.1 and does not alter the dest """
runner = FakeRunner()
runner.remote_user = "jtanner"
runner.transport = "paramiko"
conn = FakeConn()
conn.host = "127.0.0.1"
conn.delegate = "thishost"
inject = {
'inventory_hostname': "thishost",
'ansible_ssh_host': '127.0.0.1',
'ansible_connection': 'local',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.transport == "paramiko", "runner transport was changed"
assert runner.remote_user == "jtanner", "runner remote_user was changed"
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert "dest_port" not in runner.executed_complex_args, "dest_port should not have been set"
assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
assert runner.executed_complex_args.get("dest") == "/tmp/bar", "dest was set incorrectly"
def test_synchronize_action_vagrant(self):
""" Verify the action plugin accommodates the common
scenarios for vagrant boxes. """
runner = FakeRunner()
runner.remote_user = "jtanner"
runner.transport = "ssh"
conn = FakeConn()
conn.host = "127.0.0.1"
conn.delegate = "thishost"
inject = {
'inventory_hostname': "thishost",
'ansible_ssh_user': 'vagrant',
'ansible_ssh_host': '127.0.0.1',
'ansible_ssh_port': '2222',
'delegate_to': None,
'playbook_dir': '.',
'hostvars': {
'thishost': {
'inventory_hostname': 'thishost',
'ansible_ssh_port': '2222',
'ansible_ssh_host': '127.0.0.1',
'ansible_ssh_user': 'vagrant'
}
}
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.transport == "ssh", "runner transport was changed"
assert runner.remote_user == "jtanner", "runner remote_user was changed"
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_inject['ansible_ssh_user'] == "vagrant", "runner user was changed"
assert runner.executed_complex_args.get("dest_port") == "2222", "remote port was not set to 2222"
assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
assert runner.executed_complex_args.get("dest") == "[email protected]:/tmp/bar", "dest was set incorrectly"
| gpl-3.0 |
danakj/chromium | tools/grit/grit/format/chrome_messages_json_unittest.py | 23 | 3612 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for chrome_messages_json.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import grd_reader
from grit import util
from grit.tool import build
class ChromeMessagesJsonFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_SIMPLE_MESSAGE">
Simple message.
</message>
<message name="IDS_QUOTES">
element\u2019s \u201c<ph name="NAME">%s<ex>name</ex></ph>\u201d attribute
</message>
<message name="IDS_PLACEHOLDERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
<message name="IDS_PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
<message name="IDS_STARTS_WITH_SPACE">
''' (<ph name="COUNT">%d<ex>2</ex></ph>)
</message>
<message name="IDS_ENDS_WITH_SPACE">
(<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_SPACE_AT_BOTH_ENDS">
''' (<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_DOUBLE_QUOTES">
A "double quoted" message.
</message>
<message name="IDS_BACKSLASH">
\\
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = u"""
{
"SIMPLE_MESSAGE": {
"message": "Simple message."
},
"QUOTES": {
"message": "element\\u2019s \\u201c%s\\u201d attribute"
},
"PLACEHOLDERS": {
"message": "%1$d error, %2$d warning"
},
"PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE": {
"message": "$1$test$2$",
"placeholders": {
"1": {
"content": "$1"
},
"2": {
"content": "$2"
}
}
},
"STARTS_WITH_SPACE": {
"message": " (%d)"
},
"ENDS_WITH_SPACE": {
"message": "(%d) "
},
"SPACE_AT_BOTH_ENDS": {
"message": " (%d) "
},
"DOUBLE_QUOTES": {
"message": "A \\"double quoted\\" message."
},
"BACKSLASH": {
"message": "\\\\"
}
}
"""
self.assertEqual(test.strip(), output.strip())
def testTranslations(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'), buf)
output = buf.getvalue()
test = u"""
{
"ID_HELLO": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4!"
},
"ID_HELLO_USER": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4 %s"
}
}
"""
self.assertEqual(test.strip(), output.strip())
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
sadaf2605/django | django/db/models/sql/subqueries.py | 9 | 8284 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
for field, model, val in values_seq:
if hasattr(val, 'resolve_expression'):
# Resolve expressions here so that annotations are no longer needed
val = val.resolve_expression(self, allow_joins=False, for_save=True)
self.values.append((field, model, val))
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| bsd-3-clause |
cmeon/AndyImage | andy_image_resize.py | 1 | 1293 | #!/usr/bin/env python
import sys
from os import path, mkdir
from vipsCC import *
sizes = { 'ldpi':3, 'mdpi':4, 'hdpi':6, 'xhdpi':8, 'xxhdpi':12, 'xxxhdpi':16 }
if ( len(sys.argv) < 2):
print """
(H)Andy Image Resize
-----------------------------------
This program resizes images into ldpi to xxxhdpi
** It uses xxhdpi as the base image size and not hdpi like in the Android docs.
usage: andy_image_resize.py <image> [<folder>]
<image> - filename of the image file with extension.
<folder> - may be the path to resource folder of an Android app project.
"""
exit(1)
try:
fullname = sys.argv[1]
basename = path.basename(sys.argv[1])
filename, extension = tuple(path.splitext(basename))
image = VImage.VImage(fullname)
basefolder = '.'
try:
basefolder = sys.argv[2]
except IndexError, e:
print 'Printing on current folder'
for k, v in sizes.items():
red = float(16/v)
folder = basefolder+'/'+'drawable-'+k
try:
mkdir(folder)
except OSError, e:
image.shrink(red, red).write(folder +'/'+ filename+extension)
else:
image.shrink(red, red).write(folder +'/'+ filename+extension)
except VError.VError, e:
e.perror(sys.argv[0])
| mit |
HexHive/datashield | compiler/llvm/utils/lit/lit/ShCommands.py | 87 | 2696 | class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
| gpl-3.0 |
BizzCloud/PosBox | addons/marketing_campaign_crm_demo/__openerp__.py | 119 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaign - Demo',
'version': '1.0',
'depends': ['marketing_campaign',
'crm',
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
Demo data for the module marketing_campaign.
============================================
Creates demo data like leads, campaigns and segments for the module marketing_campaign.
""",
'website': 'http://www.openerp.com',
'data': [],
'demo': ['marketing_campaign_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/campaigns.jpeg','images/email_templates.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vitaly-krugl/pika | pika/heartbeat.py | 1 | 8261 | """Handle AMQP Heartbeats"""
import logging
import pika.exceptions
from pika import frame
LOGGER = logging.getLogger(__name__)
class HeartbeatChecker(object):
"""Sends heartbeats to the broker. The provided timeout is used to
determine if the connection is stale - no received heartbeats or
other activity will close the connection. See the parameter list for more
details.
"""
_STALE_CONNECTION = "No activity or too many missed heartbeats in the last %i seconds"
def __init__(self, connection, timeout):
"""Create an object that will check for activity on the provided
connection as well as receive heartbeat frames from the broker. The
timeout parameter defines a window within which this activity must
happen. If not, the connection is considered dead and closed.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
:param pika.connection.Connection: Connection object
:param int timeout: Connection idle timeout. If no activity occurs on the
connection nor heartbeat frames received during the
timeout window the connection will be closed. The
interval used to send heartbeats is calculated from
this value by dividing it by two.
"""
if timeout < 1:
raise ValueError('timeout must >= 0, but got %r' % (timeout,))
self._connection = connection
# Note: see the following documents:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
# https://github.com/pika/pika/pull/1072
# https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion
# There is a certain amount of confusion around how client developers
# interpret the spec. The spec talks about 2 missed heartbeats as a
# *timeout*, plus that any activity on the connection counts for a
# heartbeat. This is to avoid edge cases and not to depend on network
# latency.
self._timeout = timeout
self._send_interval = float(timeout) / 2
# Note: Pika will calculate the heartbeat / connectivity check interval
# by adding 5 seconds to the negotiated timeout to leave a bit of room
# for broker heartbeats that may be right at the edge of the timeout
# window. This is different behavior from the RabbitMQ Java client and
# the spec that suggests a check interval equivalent to two times the
# heartbeat timeout value. But, one advantage of adding a small amount
# is that bad connections will be detected faster.
# https://github.com/pika/pika/pull/1072#issuecomment-397850795
# https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780
# https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192
self._check_interval = timeout + 5
LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',
self._timeout,
self._send_interval,
self._check_interval)
# Initialize counters
self._bytes_received = 0
self._bytes_sent = 0
self._heartbeat_frames_received = 0
self._heartbeat_frames_sent = 0
self._idle_byte_intervals = 0
self._send_timer = None
self._check_timer = None
self._start_send_timer()
self._start_check_timer()
@property
def bytes_received_on_connection(self):
"""Return the number of bytes received by the connection bytes object.
:rtype int
"""
return self._connection.bytes_received
@property
def connection_is_idle(self):
"""Returns true if the byte count hasn't changed in enough intervals
to trip the max idle threshold.
"""
return self._idle_byte_intervals > 0
def received(self):
"""Called when a heartbeat is received"""
LOGGER.debug('Received heartbeat frame')
self._heartbeat_frames_received += 1
def _send_heartbeat(self):
"""Invoked by a timer to send a heartbeat when we need to.
"""
LOGGER.debug('Sending heartbeat frame')
self._send_heartbeat_frame()
self._start_send_timer()
def _check_heartbeat(self):
"""Invoked by a timer to check for broker heartbeats. Checks to see
if we've missed any heartbeats and disconnect our connection if it's
been idle too long.
"""
if self._has_received_data:
self._idle_byte_intervals = 0
else:
# Connection has not received any data, increment the counter
self._idle_byte_intervals += 1
LOGGER.debug('Received %i heartbeat frames, sent %i, '
'idle intervals %i',
self._heartbeat_frames_received,
self._heartbeat_frames_sent,
self._idle_byte_intervals)
if self.connection_is_idle:
self._close_connection()
return
self._start_check_timer()
def stop(self):
"""Stop the heartbeat checker"""
if self._send_timer:
LOGGER.debug('Removing timer for next heartbeat send interval')
self._connection._adapter_remove_timeout(self._send_timer) # pylint: disable=W0212
self._send_timer = None
if self._check_timer:
LOGGER.debug('Removing timer for next heartbeat check interval')
self._connection._adapter_remove_timeout(self._check_timer) # pylint: disable=W0212
self._check_timer = None
def _close_connection(self):
"""Close the connection with the AMQP Connection-Forced value."""
LOGGER.info('Connection is idle, %i stale byte intervals',
self._idle_byte_intervals)
text = HeartbeatChecker._STALE_CONNECTION % self._timeout
# Abort the stream connection. There is no point trying to gracefully
# close the AMQP connection since lack of heartbeat suggests that the
# stream is dead.
self._connection._terminate_stream( # pylint: disable=W0212
pika.exceptions.AMQPHeartbeatTimeout(text))
@property
def _has_received_data(self):
"""Returns True if the connection has received data.
:rtype: bool
"""
return self._bytes_received != self.bytes_received_on_connection
@staticmethod
def _new_heartbeat_frame():
"""Return a new heartbeat frame.
:rtype pika.frame.Heartbeat
"""
return frame.Heartbeat()
def _send_heartbeat_frame(self):
"""Send a heartbeat frame on the connection.
"""
LOGGER.debug('Sending heartbeat frame')
self._connection._send_frame( # pylint: disable=W0212
self._new_heartbeat_frame())
self._heartbeat_frames_sent += 1
def _start_send_timer(self):
"""Start a new heartbeat send timer."""
self._send_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212
self._send_interval,
self._send_heartbeat)
def _start_check_timer(self):
"""Start a new heartbeat check timer."""
# Note: update counters now to get current values
# at the start of the timeout window. Values will be
# checked against the connection's byte count at the
# end of the window
self._update_counters()
self._check_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212
self._check_interval,
self._check_heartbeat)
def _update_counters(self):
"""Update the internal counters for bytes sent and received and the
number of frames received
"""
self._bytes_sent = self._connection.bytes_sent
self._bytes_received = self._connection.bytes_received
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.