repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Kazade/NeHe-Website
google_appengine/lib/protorpc/protorpc/webapp/service_handlers.py
2
28703
#!/usr/bin/env python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Handlers for remote services. This module contains classes that may be used to build a service on top of the App Engine Webapp framework. The services request handler can be configured to handle requests in a number of different request formats. All different request formats must have a way to map the request to the service handlers defined request message.Message class. The handler can also send a response in any format that can be mapped from the response message.Message class. Participants in an RPC: There are four classes involved with the life cycle of an RPC. Service factory: A user-defined service factory that is responsible for instantiating an RPC service. The methods intended for use as RPC methods must be decorated by the 'remote' decorator. RPCMapper: Responsible for determining whether or not a specific request matches a particular RPC format and translating between the actual request/response and the underlying message types. A single instance of an RPCMapper sub-class is required per service configuration. Each mapper must be usable across multiple requests. ServiceHandler: A webapp.RequestHandler sub-class that responds to the webapp framework. It mediates between the RPCMapper and service implementation class during a request. As determined by the Webapp framework, a new ServiceHandler instance is created to handle each user request. A handler is never used to handle more than one request. ServiceHandlerFactory: A class that is responsible for creating new, properly configured ServiceHandler instance for each request. The factory is configured by providing it with a set of RPCMapper instances. When the Webapp framework invokes the service handler, the handler creates a new service class instance. The service class instance is provided with a reference to the handler. A single instance of an RPCMapper sub-class is required to configure each service. Each mapper instance must be usable across multiple requests. RPC mappers: RPC mappers translate between a single HTTP based RPC protocol and the underlying service implementation. Each RPC mapper must configured with the following information to determine if it is an appropriate mapper for a given request: http_methods: Set of HTTP methods supported by handler. content_types: Set of supported content types. default_content_type: Default content type for handler responses. Built-in mapper implementations: URLEncodedRPCMapper: Matches requests that are compatible with post forms with the 'application/x-www-form-urlencoded' content-type (this content type is the default if none is specified. It translates post parameters into request parameters. ProtobufRPCMapper: Matches requests that are compatible with post forms with the 'application/x-google-protobuf' content-type. It reads the contents of a binary post request. Public Exceptions: Error: Base class for service handler errors. ServiceConfigurationError: Raised when a service not correctly configured. RequestError: Raised by RPC mappers when there is an error in its request or request format. ResponseError: Raised by RPC mappers when there is an error in its response. """ __author__ = '[email protected] (Rafe Kaplan)' import array import cgi import itertools import logging import re import sys import traceback import urllib import weakref from google.appengine.ext import webapp from google.appengine.ext.webapp import util as webapp_util from protorpc import messages from protorpc import protobuf from protorpc import protojson from protorpc import protourlencode from protorpc import registry from protorpc import remote from protorpc import util from protorpc.webapp import forms __all__ = [ 'Error', 'RequestError', 'ResponseError', 'ServiceConfigurationError', 'DEFAULT_REGISTRY_PATH', 'ProtobufRPCMapper', 'RPCMapper', 'ServiceHandler', 'ServiceHandlerFactory', 'URLEncodedRPCMapper', 'JSONRPCMapper', 'service_mapping', 'run_services', ] class Error(Exception): """Base class for all errors in service handlers module.""" class ServiceConfigurationError(Error): """When service configuration is incorrect.""" class RequestError(Error): """Error occurred when building request.""" class ResponseError(Error): """Error occurred when building response.""" _URLENCODED_CONTENT_TYPE = protourlencode.CONTENT_TYPE _PROTOBUF_CONTENT_TYPE = protobuf.CONTENT_TYPE _JSON_CONTENT_TYPE = protojson.CONTENT_TYPE _EXTRA_JSON_CONTENT_TYPES = ['application/x-javascript', 'text/javascript', 'text/x-javascript', 'text/x-json', 'text/json', ] # The whole method pattern is an optional regex. It contains a single # group used for mapping to the query parameter. This is passed to the # parameters of 'get' and 'post' on the ServiceHandler. _METHOD_PATTERN = r'(?:\.([^?]*))?' DEFAULT_REGISTRY_PATH = forms.DEFAULT_REGISTRY_PATH class RPCMapper(object): """Interface to mediate between request and service object. Request mappers are implemented to support various types of RPC protocols. It is responsible for identifying whether a given request matches a particular protocol, resolve the remote method to invoke and mediate between the request and appropriate protocol messages for the remote method. """ @util.positional(4) def __init__(self, http_methods, default_content_type, protocol, content_types=None): """Constructor. Args: http_methods: Set of HTTP methods supported by mapper. default_content_type: Default content type supported by mapper. protocol: The protocol implementation. Must implement encode_message and decode_message. content_types: Set of additionally supported content types. """ self.__http_methods = frozenset(http_methods) self.__default_content_type = default_content_type self.__protocol = protocol if content_types is None: content_types = [] self.__content_types = frozenset([self.__default_content_type] + content_types) @property def http_methods(self): return self.__http_methods @property def default_content_type(self): return self.__default_content_type @property def content_types(self): return self.__content_types def build_request(self, handler, request_type): """Build request message based on request. Each request mapper implementation is responsible for converting a request to an appropriate message instance. Args: handler: RequestHandler instance that is servicing request. Must be initialized with request object and been previously determined to matching the protocol of the RPCMapper. request_type: Message type to build. Returns: Instance of request_type populated by protocol buffer in request body. Raises: RequestError if the mapper implementation is not able to correctly convert the request to the appropriate message. """ try: return self.__protocol.decode_message(request_type, handler.request.body) except (messages.ValidationError, messages.DecodeError), err: raise RequestError('Unable to parse request content: %s' % err) def build_response(self, handler, response): """Build response based on service object response message. Each request mapper implementation is responsible for converting a response message to an appropriate handler response. Args: handler: RequestHandler instance that is servicing request. Must be initialized with request object and been previously determined to matching the protocol of the RPCMapper. response: Response message as returned from the service object. Raises: ResponseError if the mapper implementation is not able to correctly convert the message to an appropriate response. """ try: encoded_message = self.__protocol.encode_message(response) except messages.ValidationError, err: raise ResponseError('Unable to encode message: %s' % err) else: handler.response.headers['Content-Type'] = self.default_content_type handler.response.out.write(encoded_message) class ServiceHandlerFactory(object): """Factory class used for instantiating new service handlers. Normally a handler class is passed directly to the webapp framework so that it can be simply instantiated to handle a single request. The service handler, however, must be configured with additional information so that it knows how to instantiate a service object. This class acts the same as a normal RequestHandler class by overriding the __call__ method to correctly configures a ServiceHandler instance with a new service object. The factory must also provide a set of RPCMapper instances which examine a request to determine what protocol is being used and mediates between the request and the service object. The mapping of a service handler must have a single group indicating the part of the URL path that maps to the request method. This group must exist but can be optional for the request (the group may be followed by '?' in the regular expression matching the request). Usage: stock_factory = ServiceHandlerFactory(StockService) ... configure stock_factory by adding RPCMapper instances ... application = webapp.WSGIApplication( [stock_factory.mapping('/stocks')]) Default usage: application = webapp.WSGIApplication( [ServiceHandlerFactory.default(StockService).mapping('/stocks')]) """ def __init__(self, service_factory): """Constructor. Args: service_factory: Service factory to instantiate and provide to service handler. """ self.__service_factory = service_factory self.__request_mappers = [] def all_request_mappers(self): """Get all request mappers. Returns: Iterator of all request mappers used by this service factory. """ return iter(self.__request_mappers) def add_request_mapper(self, mapper): """Add request mapper to end of request mapper list.""" self.__request_mappers.append(mapper) def __call__(self): """Construct a new service handler instance.""" return ServiceHandler(self, self.__service_factory()) @property def service_factory(self): """Service factory associated with this factory.""" return self.__service_factory @staticmethod def __check_path(path): """Check a path parameter. Make sure a provided path parameter is compatible with the webapp URL mapping. Args: path: Path to check. This is a plain path, not a regular expression. Raises: ValueError if path does not start with /, path ends with /. """ if path.endswith('/'): raise ValueError('Path %s must not end with /.' % path) def mapping(self, path): """Convenience method to map service to application. Args: path: Path to map service to. It must be a simple path with a leading / and no trailing /. Returns: Mapping from service URL to service handler factory. """ self.__check_path(path) service_url_pattern = r'(%s)%s' % (path, _METHOD_PATTERN) return service_url_pattern, self @classmethod def default(cls, service_factory, parameter_prefix=''): """Convenience method to map default factory configuration to application. Creates a standardized default service factory configuration that pre-maps the URL encoded protocol handler to the factory. Args: service_factory: Service factory to instantiate and provide to service handler. method_parameter: The name of the form parameter used to determine the method to invoke used by the URLEncodedRPCMapper. If None, no parameter is used and the mapper will only match against the form path-name. Defaults to 'method'. parameter_prefix: If provided, all the parameters in the form are expected to begin with that prefix by the URLEncodedRPCMapper. Returns: Mapping from service URL to service handler factory. """ factory = cls(service_factory) factory.add_request_mapper(ProtobufRPCMapper()) factory.add_request_mapper(JSONRPCMapper()) return factory class ServiceHandler(webapp.RequestHandler): """Web handler for RPC service. Overridden methods: get: All requests handled by 'handle' method. HTTP method stored in attribute. Takes remote_method parameter as derived from the URL mapping. post: All requests handled by 'handle' method. HTTP method stored in attribute. Takes remote_method parameter as derived from the URL mapping. redirect: Not implemented for this service handler. New methods: handle: Handle request for both GET and POST. Attributes (in addition to attributes in RequestHandler): service: Service instance associated with request being handled. method: Method of request. Used by RPCMapper to determine match. remote_method: Sub-path as provided to the 'get' and 'post' methods. """ def __init__(self, factory, service): """Constructor. Args: factory: Instance of ServiceFactory used for constructing new service instances used for handling requests. service: Service instance used for handling RPC. """ self.__factory = factory self.__service = service @property def service(self): return self.__service def __show_info(self, service_path, remote_method): self.response.headers['content-type'] = 'text/plain; charset=utf-8' if remote_method: self.response.out.write('%s.%s is a ProtoRPC method.\n\n' %( service_path, remote_method)) else: self.response.out.write('%s is a ProtoRPC service.\n\n' % service_path) definition_name_function = getattr(self.__service, 'definition_name', None) if definition_name_function: definition_name = definition_name_function() else: definition_name = '%s.%s' % (self.__service.__module__, self.__service.__class__.__name__) self.response.out.write('Service %s\n\n' % definition_name) self.response.out.write('More about ProtoRPC: ' 'http://code.google.com/p/google-protorpc\n') def get(self, service_path, remote_method): """Handler method for GET requests. Args: service_path: Service path derived from request URL. remote_method: Sub-path after service path has been matched. """ if remote_method: self.handle('GET', service_path, remote_method) else: self.response.headers['x-content-type-options'] = 'nosniff' self.error(405) if self.response.status in (405, 415) or not self.__get_content_type(): self.__show_info(service_path, remote_method) def post(self, service_path, remote_method): """Handler method for POST requests. Args: service_path: Service path derived from request URL. remote_method: Sub-path after service path has been matched. """ self.handle('POST', service_path, remote_method) def redirect(self, uri, permanent=False): """Not supported for services.""" raise NotImplementedError('Services do not currently support redirection.') def __send_error(self, http_code, status_state, error_message, mapper, error_name=None): status = remote.RpcStatus(state=status_state, error_message=error_message, error_name=error_name) encoded_status = mapper.build_response(self, status) self.response.headers['content-type'] = mapper.default_content_type logging.error(error_message) self.response.set_status(http_code, error_message) def __send_simple_error(self, code, message): """Send error to caller without embedded message.""" self.response.headers['content-type'] = 'text/plain; charset=utf-8' logging.error(message) self.response.set_status(code, message) def __get_content_type(self): content_type = self.request.headers.get('content-type', None) if not content_type: content_type = self.request.environ.get('HTTP_CONTENT_TYPE', None) if not content_type: return None # Lop off parameters from the end (for example content-encoding) return content_type.split(';', 1)[0].lower() def __headers(self, content_type): for name in self.request.headers: name = name.lower() if name == 'content-type': value = content_type elif name == 'content-length': value = str(len(self.request.body)) else: value = self.request.headers.get(name, '') yield name, value def handle(self, http_method, service_path, remote_method): """Handle a service request. The handle method will handle either a GET or POST response. It is up to the individual mappers from the handler factory to determine which request methods they can service. If the protocol is not recognized, the request does not provide a correct request for that protocol or the service object does not support the requested RPC method, will return error code 400 in the response. Args: http_method: HTTP method of request. service_path: Service path derived from request URL. remote_method: Sub-path after service path has been matched. """ self.response.headers['x-content-type-options'] = 'nosniff' content_type = self.__get_content_type() # Provide server state to the service. If the service object does not have # an "initialize_request_state" method, will not attempt to assign state. try: state_initializer = self.service.initialize_request_state except AttributeError: pass else: server_port = self.request.environ.get('SERVER_PORT', None) if server_port: server_port = int(server_port) request_state = remote.HttpRequestState( remote_host=self.request.environ.get('REMOTE_HOST', None), remote_address=self.request.environ.get('REMOTE_ADDR', None), server_host=self.request.environ.get('SERVER_HOST', None), server_port=server_port, http_method=http_method, service_path=service_path, headers=list(self.__headers(content_type))) state_initializer(request_state) if not content_type: self.__send_simple_error(400, 'Invalid RPC request: missing content-type') return # Search for mapper to mediate request. for mapper in self.__factory.all_request_mappers(): if content_type in mapper.content_types: break else: self.__send_simple_error(415, 'Unsupported content-type: %s' % content_type) return try: if http_method not in mapper.http_methods: self.__send_simple_error(405, 'Unsupported HTTP method: %s' % http_method) return try: try: method = getattr(self.service, remote_method) method_info = method.remote except AttributeError, err: self.__send_error( 400, remote.RpcState.METHOD_NOT_FOUND_ERROR, 'Unrecognized RPC method: %s' % remote_method, mapper) return request = mapper.build_request(self, method_info.request_type) except (RequestError, messages.DecodeError), err: self.__send_error(400, remote.RpcState.REQUEST_ERROR, 'Error parsing ProtoRPC request (%s)' % err, mapper) return try: response = method(request) except remote.ApplicationError, err: self.__send_error(400, remote.RpcState.APPLICATION_ERROR, err.message, mapper, err.error_name) return mapper.build_response(self, response) except Exception, err: logging.error('An unexpected error occured when handling RPC: %s', err, exc_info=1) self.__send_error(500, remote.RpcState.SERVER_ERROR, 'Internal Server Error', mapper) return # TODO(rafek): Support tag-id only forms. class URLEncodedRPCMapper(RPCMapper): """Request mapper for application/x-www-form-urlencoded forms. This mapper is useful for building forms that can invoke RPC. Many services are also configured to work using URL encoded request information because of its perceived ease of programming and debugging. The mapper must be provided with at least method_parameter or remote_method_pattern so that it is possible to determine how to determine the requests RPC method. If both are provided, the service will respond to both method request types, however, only one may be present in a given request. If both types are detected, the request will not match. """ def __init__(self, parameter_prefix=''): """Constructor. Args: parameter_prefix: If provided, all the parameters in the form are expected to begin with that prefix. """ # Private attributes: # __parameter_prefix: parameter prefix as provided by constructor # parameter. super(URLEncodedRPCMapper, self).__init__(['POST'], _URLENCODED_CONTENT_TYPE, self) self.__parameter_prefix = parameter_prefix def encode_message(self, message): """Encode a message using parameter prefix. Args: message: Message to URL Encode. Returns: URL encoded message. """ return protourlencode.encode_message(message, prefix=self.__parameter_prefix) @property def parameter_prefix(self): """Prefix all form parameters are expected to begin with.""" return self.__parameter_prefix def build_request(self, handler, request_type): """Build request from URL encoded HTTP request. Constructs message from names of URL encoded parameters. If this service handler has a parameter prefix, parameters must begin with it or are ignored. Args: handler: RequestHandler instance that is servicing request. request_type: Message type to build. Returns: Instance of request_type populated by protocol buffer in request parameters. Raises: RequestError if message type contains nested message field or repeated message field. Will raise RequestError if there are any repeated parameters. """ request = request_type() builder = protourlencode.URLEncodedRequestBuilder( request, prefix=self.__parameter_prefix) for argument in sorted(handler.request.arguments()): values = handler.request.get_all(argument) try: builder.add_parameter(argument, values) except messages.DecodeError, err: raise RequestError(str(err)) return request class ProtobufRPCMapper(RPCMapper): """Request mapper for application/x-protobuf service requests. This mapper will parse protocol buffer from a POST body and return the request as a protocol buffer. """ def __init__(self): super(ProtobufRPCMapper, self).__init__(['POST'], _PROTOBUF_CONTENT_TYPE, protobuf) class JSONRPCMapper(RPCMapper): """Request mapper for application/x-protobuf service requests. This mapper will parse protocol buffer from a POST body and return the request as a protocol buffer. """ def __init__(self): super(JSONRPCMapper, self).__init__( ['POST'], _JSON_CONTENT_TYPE, protojson, content_types=_EXTRA_JSON_CONTENT_TYPES) def service_mapping(services, registry_path=DEFAULT_REGISTRY_PATH): """Create a services mapping for use with webapp. Creates basic default configuration and registration for ProtoRPC services. Each service listed in the service mapping has a standard service handler factory created for it. The list of mappings can either be an explicit path to service mapping or just services. If mappings are just services, they will automatically be mapped to their default name. For exampel: package = 'my_package' class MyService(remote.Service): ... server_mapping([('/my_path', MyService), # Maps to /my_path MyService, # Maps to /my_package/MyService ]) Specifying a service mapping: Normally services are mapped to URL paths by specifying a tuple (path, service): path: The path the service resides on. service: The service class or service factory for creating new instances of the service. For more information about service factories, please see remote.Service.new_factory. If no tuple is provided, and therefore no path specified, a default path is calculated by using the fully qualified service name using a URL path separator for each of its components instead of a '.'. Args: services: Can be service type, service factory or string definition name of service being mapped or list of tuples (path, service): path: Path on server to map service to. service: Service type, service factory or string definition name of service being mapped. Can also be a dict. If so, the keys are treated as the path and values as the service. registry_path: Path to give to registry service. Use None to disable registry service. Returns: List of tuples defining a mapping of request handlers compatible with a webapp application. Raises: ServiceConfigurationError when duplicate paths are provided. """ if isinstance(services, dict): services = services.iteritems() mapping = [] registry_map = {} if registry_path is not None: registry_service = registry.RegistryService.new_factory(registry_map) services = list(services) + [(registry_path, registry_service)] mapping.append((registry_path + r'/form(?:/)?', forms.FormsHandler.new_factory(registry_path))) mapping.append((registry_path + r'/form/(.+)', forms.ResourceHandler)) paths = set() for service_item in services: infer_path = not isinstance(service_item, (list, tuple)) if infer_path: service = service_item else: service = service_item[1] service_class = getattr(service, 'service_class', service) if infer_path: path = '/' + service_class.definition_name().replace('.', '/') else: path = service_item[0] if path in paths: raise ServiceConfigurationError( 'Path %r is already defined in service mapping' % path.encode('utf-8')) else: paths.add(path) # Create service mapping for webapp. new_mapping = ServiceHandlerFactory.default(service).mapping(path) mapping.append(new_mapping) # Update registry with service class. registry_map[path] = service_class return mapping def run_services(services, registry_path=DEFAULT_REGISTRY_PATH): """Handle CGI request using service mapping. Args: Same as service_mapping. """ mappings = service_mapping(services, registry_path=registry_path) application = webapp.WSGIApplication(mappings) webapp_util.run_wsgi_app(application)
bsd-3-clause
dvliman/jaikuengine
.google_appengine/lib/django-1.4/tests/regressiontests/localflavor/se/tests.py
33
6441
# -*- coding: utf-8 -*- from django.contrib.localflavor.se.forms import (SECountySelect, SEOrganisationNumberField, SEPersonalIdentityNumberField, SEPostalCodeField) import datetime from django.test import SimpleTestCase class SELocalFlavorTests(SimpleTestCase): def setUp(self): # Mocking datetime.date to make sure # localflavor.se.utils.validate_id_birthday works class MockDate(datetime.date): def today(cls): return datetime.date(2008, 5, 14) today = classmethod(today) self._olddate = datetime.date datetime.date = MockDate def tearDown(self): datetime.date = self._olddate def test_SECountySelect(self): f = SECountySelect() out = u'''<select name="swedish_county"> <option value="AB">Stockholm</option> <option value="AC">V\xe4sterbotten</option> <option value="BD">Norrbotten</option> <option value="C">Uppsala</option> <option value="D">S\xf6dermanland</option> <option value="E" selected="selected">\xd6sterg\xf6tland</option> <option value="F">J\xf6nk\xf6ping</option> <option value="G">Kronoberg</option> <option value="H">Kalmar</option> <option value="I">Gotland</option> <option value="K">Blekinge</option> <option value="M">Sk\xe5ne</option> <option value="N">Halland</option> <option value="O">V\xe4stra G\xf6taland</option> <option value="S">V\xe4rmland</option> <option value="T">\xd6rebro</option> <option value="U">V\xe4stmanland</option> <option value="W">Dalarna</option> <option value="X">G\xe4vleborg</option> <option value="Y">V\xe4sternorrland</option> <option value="Z">J\xe4mtland</option> </select>''' self.assertHTMLEqual(f.render('swedish_county', 'E'), out) def test_SEOrganizationNumberField(self): error_invalid = [u'Enter a valid Swedish organisation number.'] valid = { '870512-1989': '198705121989', '19870512-1989': '198705121989', '870512-2128': '198705122128', '081015-6315': '190810156315', '081015+6315': '180810156315', '0810156315': '190810156315', # Test some different organisation numbers # IKEA Linköping '556074-7569': '5560747569', # Volvo Personvagnar '556074-3089': '5560743089', # LJS (organisation) '822001-5476': '8220015476', # LJS (organisation) '8220015476': '8220015476', # Katedralskolan Linköping (school) '2120000449': '2120000449', # Faux organisation number, which tests that the checksum can be 0 '232518-5060': '2325185060', } invalid = { # Ordinary personal identity numbers for sole proprietors # The same rules as for SEPersonalIdentityField applies here '081015 6315': error_invalid, '950231-4496': error_invalid, '6914104499': error_invalid, '950d314496': error_invalid, 'invalid!!!': error_invalid, '870514-1111': error_invalid, # Co-ordination number checking # Co-ordination numbers are not valid organisation numbers '870574-1315': error_invalid, '870573-1311': error_invalid, # Volvo Personvagnar, bad format '556074+3089': error_invalid, # Invalid checksum '2120000441': error_invalid, # Valid checksum but invalid organisation type '1120000441': error_invalid, } self.assertFieldOutput(SEOrganisationNumberField, valid, invalid) def test_SEPersonalIdentityNumberField(self): error_invalid = [u'Enter a valid Swedish personal identity number.'] error_coord = [u'Co-ordination numbers are not allowed.'] valid = { '870512-1989': '198705121989', '870512-2128': '198705122128', '19870512-1989': '198705121989', '198705121989': '198705121989', '081015-6315': '190810156315', '0810156315': '190810156315', # This is a "special-case" in the checksum calculation, # where the sum is divisible by 10 (the checksum digit == 0) '8705141060': '198705141060', # + means that the person is older than 100 years '081015+6315': '180810156315', # Co-ordination number checking '870574-1315': '198705741315', '870574+1315': '188705741315', '198705741315': '198705741315', } invalid = { '081015 6315': error_invalid, '950d314496': error_invalid, 'invalid!!!': error_invalid, # Invalid dates # February 31st does not exist '950231-4496': error_invalid, # Month 14 does not exist '6914104499': error_invalid, # There are no Swedish personal id numbers where year < 1800 '17430309-7135': error_invalid, # Invalid checksum '870514-1111': error_invalid, # Co-ordination number with bad checksum '870573-1311': error_invalid, } self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid) valid = {} invalid = { # Check valid co-ordination numbers that should not be accepted # because of coordination_number=False '870574-1315': error_coord, '870574+1315': error_coord, '8705741315': error_coord, # Invalid co-ordination numbers should be treated as invalid, and not # as co-ordination numbers '870573-1311': error_invalid, } kwargs = {'coordination_number': False,} self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid, field_kwargs=kwargs) def test_SEPostalCodeField(self): error_format = [u'Enter a Swedish postal code in the format XXXXX.'] valid = { '589 37': '58937', '58937': '58937', } invalid = { 'abcasfassadf': error_format, # Only one space is allowed for separation '589 37': error_format, # The postal code must not start with 0 '01234': error_format, } self.assertFieldOutput(SEPostalCodeField, valid, invalid)
apache-2.0
scripni/rethinkdb
test/rql_test/connections/http_support/flask/debughelpers.py
777
3508
# -*- coding: utf-8 -*- """ flask.debughelpers ~~~~~~~~~~~~~~~~~~ Various helpers to make the development experience better. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from ._compat import implements_to_string class UnexpectedUnicodeError(AssertionError, UnicodeError): """Raised in places where we want some better error reporting for unexpected unicode or binary data. """ @implements_to_string class DebugFilesKeyError(KeyError, AssertionError): """Raised from request.files during debugging. The idea is that it can provide a better error message than just a generic KeyError/BadRequest. """ def __init__(self, request, key): form_matches = request.form.getlist(key) buf = ['You tried to access the file "%s" in the request.files ' 'dictionary but it does not exist. The mimetype for the request ' 'is "%s" instead of "multipart/form-data" which means that no ' 'file contents were transmitted. To fix this error you should ' 'provide enctype="multipart/form-data" in your form.' % (key, request.mimetype)] if form_matches: buf.append('\n\nThe browser instead transmitted some file names. ' 'This was submitted: %s' % ', '.join('"%s"' % x for x in form_matches)) self.msg = ''.join(buf) def __str__(self): return self.msg class FormDataRoutingRedirect(AssertionError): """This exception is raised by Flask in debug mode if it detects a redirect caused by the routing system when the request method is not GET, HEAD or OPTIONS. Reasoning: form data will be dropped. """ def __init__(self, request): exc = request.routing_exception buf = ['A request was sent to this URL (%s) but a redirect was ' 'issued automatically by the routing system to "%s".' % (request.url, exc.new_url)] # In case just a slash was appended we can be extra helpful if request.base_url + '/' == exc.new_url.split('?')[0]: buf.append(' The URL was defined with a trailing slash so ' 'Flask will automatically redirect to the URL ' 'with the trailing slash if it was accessed ' 'without one.') buf.append(' Make sure to directly send your %s-request to this URL ' 'since we can\'t make browsers or HTTP clients redirect ' 'with form data reliably or without user interaction.' % request.method) buf.append('\n\nNote: this exception is only raised in debug mode') AssertionError.__init__(self, ''.join(buf).encode('utf-8')) def attach_enctype_error_multidict(request): """Since Flask 0.8 we're monkeypatching the files object in case a request is detected that does not use multipart form data but the files object is accessed. """ oldcls = request.files.__class__ class newcls(oldcls): def __getitem__(self, key): try: return oldcls.__getitem__(self, key) except KeyError as e: if key not in request.form: raise raise DebugFilesKeyError(request, key) newcls.__name__ = oldcls.__name__ newcls.__module__ = oldcls.__module__ request.files.__class__ = newcls
agpl-3.0
odicraig/kodi2odi
addons/plugin.program.super.favourites/selector.py
2
2421
# # Copyright (C) 2014-2015 # Sean Poyser ([email protected]) # # This Program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This Program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with XBMC; see the file COPYING. If not, write to # the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. # http://www.gnu.org/copyleft/gpl.html # import inspect FILENAME = inspect.getfile(inspect.currentframe()) def _select(index): import utils #utils.DialogOK(str(index)) if index < 0: return import xbmc import utils view = 0 count = 10 while view < 1 and count > 0: count -= 1 view = utils.getViewType() xbmc.sleep(50) if view < 1: return import xbmcgui win = None count = 10 while not win and count > 0: count -= 1 try: win = xbmcgui.Window(utils.getCurrentWindowId()) except: xbmc.sleep(50) if not win: return list = None count = 10 while not list and count > 0: try: list = win.getControl(view) except: xbmc.sleep(50) if not list: return xbmc.sleep(50) try: nItem = int(xbmcgui.Window(10000).getProperty('SF_NMR_ITEMS')) if index >= nItem: index = nItem-1 except: pass list.selectItem(index) def select(index): import utils import xbmc import os HOME = utils.HOME name = 'select' script = FILENAME args = '%d' % index cmd = 'AlarmClock(%s,RunScript(%s,%s),%d,True)' % (name, script, args, 0) xbmc.executebuiltin('CancelAlarm(%s,True)' % name) xbmc.executebuiltin(cmd) utils.log(cmd, True) if __name__ == '__main__': if FILENAME.endswith(sys.argv[0]): try: _select(int(sys.argv[1])) except: pass
gpl-3.0
kumajaya/android_kernel_samsung_universal5422
tools/perf/tests/attr.py
3174
9441
#! /usr/bin/python import os import sys import glob import optparse import tempfile import logging import shutil import ConfigParser class Fail(Exception): def __init__(self, test, msg): self.msg = msg self.test = test def getMsg(self): return '\'%s\' - %s' % (self.test.path, self.msg) class Unsup(Exception): def __init__(self, test): self.test = test def getMsg(self): return '\'%s\'' % self.test.path class Event(dict): terms = [ 'cpu', 'flags', 'type', 'size', 'config', 'sample_period', 'sample_type', 'read_format', 'disabled', 'inherit', 'pinned', 'exclusive', 'exclude_user', 'exclude_kernel', 'exclude_hv', 'exclude_idle', 'mmap', 'comm', 'freq', 'inherit_stat', 'enable_on_exec', 'task', 'watermark', 'precise_ip', 'mmap_data', 'sample_id_all', 'exclude_host', 'exclude_guest', 'exclude_callchain_kernel', 'exclude_callchain_user', 'wakeup_events', 'bp_type', 'config1', 'config2', 'branch_sample_type', 'sample_regs_user', 'sample_stack_user', ] def add(self, data): for key, val in data: log.debug(" %s = %s" % (key, val)) self[key] = val def __init__(self, name, data, base): log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) self.add(data) def compare_data(self, a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') b_list = b.split('|') for a_item in a_list: for b_item in b_list: if (a_item == b_item): return True elif (a_item == '*') or (b_item == '*'): return True return False def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); if not self.has_key(t) or not other.has_key(t): return False if not self.compare_data(self[t], other[t]): return False return True def diff(self, other): for t in Event.terms: if not self.has_key(t) or not other.has_key(t): continue if not self.compare_data(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) # Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - expected command return value (0 by default) # # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options): parser = ConfigParser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) self.path = path self.test_dir = options.test_dir self.perf = options.perf self.command = parser.get('config', 'command') self.args = parser.get('config', 'args') try: self.ret = parser.get('config', 'ret') except: self.ret = 0 self.expect = {} self.result = {} log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): if name.find("event") == -1: return False else: return True def load_events(self, path, events): parser_event = ConfigParser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()): parser_items = parser_event.items(section); base_items = {} # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] parser_base = ConfigParser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') e = Event(section, parser_items, base_items) events[section] = e def run_cmd(self, tempdir): cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) log.info(" '%s' ret %d " % (cmd, ret)) if ret != int(self.ret): raise Unsup(self) def compare(self, expect, result): match = {} log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)): exp_list.append(res_name) log.debug(" ->OK") else: log.debug(" ->FAIL"); log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if (not exp_list): exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list # For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items(): group = exp_event.group if (group == ''): continue for res_name in match[exp_name]: res_group = result[res_name].group if res_group not in match[group]: raise Fail(self, 'group failure') log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): group_fd = event['group_fd']; if group_fd == '-1': continue; for iname, ievent in events.items(): if (ievent['fd'] == group_fd): event.group = iname log.debug('[%s] has group leader [%s]' % (name, iname)) break; def run(self): tempdir = tempfile.mkdtemp(); try: # run the test script self.run_cmd(tempdir); # load events expectation for the test log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); # resolve group_fd to event names self.resolve_groups(self.expect); self.resolve_groups(self.result); # do the expectation - results matching - both ways self.compare(self.expect, self.result) self.compare(self.result, self.expect) finally: # cleanup shutil.rmtree(tempdir) def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() except Unsup, obj: log.warning("unsupp %s" % obj.getMsg()) def setup_log(verbose): global log level = logging.CRITICAL if verbose == 1: level = logging.WARNING if verbose == 2: level = logging.INFO if verbose >= 3: level = logging.DEBUG log = logging.getLogger('test') log.setLevel(level) ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) log.addHandler(ch) USAGE = '''%s [OPTIONS] -d dir # tests dir -p path # perf binary -t test # single test -v # verbose level ''' % sys.argv[0] def main(): parser = optparse.OptionParser(usage=USAGE) parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-d", "--test-dir", action="store", type="string", dest="test_dir") parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", action="count", dest="verbose") options, args = parser.parse_args() if args: parser.error('FAILED wrong arguments %s' % ' '.join(args)) return -1 setup_log(options.verbose) if not options.test_dir: print 'FAILED no -d option specified' sys.exit(-1) if not options.test: options.test = 'test*' try: run_tests(options) except Fail, obj: print "FAILED %s" % obj.getMsg(); sys.exit(-1) sys.exit(0) if __name__ == '__main__': main()
gpl-2.0
jelugbo/hebs_master
common/djangoapps/student/tests/test_create_account.py
15
5962
"Tests for account creation" import ddt import unittest from django.contrib.auth.models import User from django.test.client import RequestFactory from django.conf import settings from django.core.urlresolvers import reverse from django.contrib.auth.models import AnonymousUser from django.utils.importlib import import_module from django.test import TestCase, TransactionTestCase import mock from user_api.models import UserPreference from lang_pref import LANGUAGE_KEY from edxmako.tests import mako_middleware_process_request from external_auth.models import ExternalAuthMap import student TEST_CS_URL = 'https://comments.service.test:123/' @ddt.ddt class TestCreateAccount(TestCase): "Tests for account creation" def setUp(self): self.username = "test_user" self.url = reverse("create_account") self.request_factory = RequestFactory() self.params = { "username": self.username, "email": "[email protected]", "password": "testpass", "name": "Test User", "honor_code": "true", "terms_of_service": "true", } @ddt.data("en", "eo") def test_default_lang_pref_saved(self, lang): with mock.patch("django.conf.settings.LANGUAGE_CODE", lang): response = self.client.post(self.url, self.params) self.assertEqual(response.status_code, 200) user = User.objects.get(username=self.username) self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang) @ddt.data("en", "eo") def test_header_lang_pref_saved(self, lang): response = self.client.post(self.url, self.params, HTTP_ACCEPT_LANGUAGE=lang) self.assertEqual(response.status_code, 200) user = User.objects.get(username=self.username) self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang) def base_extauth_bypass_sending_activation_email(self, bypass_activation_email_for_extauth_setting): """ Tests user creation without sending activation email when doing external auth """ request = self.request_factory.post(self.url, self.params) # now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session. request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session extauth = ExternalAuthMap(external_id='[email protected]', external_email='[email protected]', internal_password=self.params['password'], external_domain='shib:https://idp.stanford.edu/') request.session['ExternalAuthMap'] = extauth request.user = AnonymousUser() mako_middleware_process_request(request) with mock.patch('django.contrib.auth.models.User.email_user') as mock_send_mail: student.views.create_account(request) # check that send_mail is called if bypass_activation_email_for_extauth_setting: self.assertFalse(mock_send_mail.called) else: self.assertTrue(mock_send_mail.called) @unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set") @mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'AUTOMATIC_AUTH_FOR_TESTING': False}) def test_extauth_bypass_sending_activation_email_with_bypass(self): """ Tests user creation without sending activation email when settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=True and doing external auth """ self.base_extauth_bypass_sending_activation_email(True) @unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set") @mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': False, 'AUTOMATIC_AUTH_FOR_TESTING': False}) def test_extauth_bypass_sending_activation_email_without_bypass(self): """ Tests user creation without sending activation email when settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=False and doing external auth """ self.base_extauth_bypass_sending_activation_email(False) @mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) @mock.patch("lms.lib.comment_client.User.base_url", TEST_CS_URL) @mock.patch("lms.lib.comment_client.utils.requests.request", return_value=mock.Mock(status_code=200, text='{}')) class TestCreateCommentsServiceUser(TransactionTestCase): def setUp(self): self.username = "test_user" self.url = reverse("create_account") self.params = { "username": self.username, "email": "[email protected]", "password": "testpass", "name": "Test User", "honor_code": "true", "terms_of_service": "true", } def test_cs_user_created(self, request): "If user account creation succeeds, we should create a comments service user" response = self.client.post(self.url, self.params) self.assertEqual(response.status_code, 200) self.assertTrue(request.called) args, kwargs = request.call_args self.assertEqual(args[0], 'put') self.assertTrue(args[1].startswith(TEST_CS_URL)) self.assertEqual(kwargs['data']['username'], self.params['username']) @mock.patch("student.models.Registration.register", side_effect=Exception) def test_cs_user_not_created(self, register, request): "If user account creation fails, we should not create a comments service user" try: response = self.client.post(self.url, self.params) except: pass with self.assertRaises(User.DoesNotExist): User.objects.get(username=self.username) self.assertTrue(register.called) self.assertFalse(request.called)
agpl-3.0
Iconoclasteinc/tgit
test/test_announcer.py
1
1124
import pytest from flexmock import flexmock from tgit.announcer import Announcer pytestmark = pytest.mark.unit class Listener(object): def event_occurred(self, event): pass @pytest.fixture def announcer(): return Announcer() @pytest.fixture def event(): return "event" def test_announces_to_all_subscribed_listeners(announcer, event): _listeners_are_subscribed(announcer, event) announcer.event_occurred(event) def test_stops_announcing_to_unregistered_listeners(announcer, event): should_not_notified = flexmock(Listener()) announcer.addListener(should_not_notified) _listeners_are_subscribed(announcer, event) announcer.removeListener(should_not_notified) should_not_notified.should_receive("event_occurred").never() announcer.event_occurred(event) def _listeners_are_subscribed(announcer, event): for i in range(5): _subscribe_listener(announcer, event) def _subscribe_listener(announcer, event): listener = flexmock(Listener()) listener.should_receive("event_occurred").with_args(event).once() announcer.addListener(listener)
gpl-3.0
kamyu104/django
tests/auth_tests/test_decorators.py
279
4124
from django.conf import settings from django.contrib.auth import models from django.contrib.auth.decorators import login_required, permission_required from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.test import TestCase, override_settings from django.test.client import RequestFactory from .test_views import AuthViewsTestCase @override_settings(ROOT_URLCONF='auth_tests.urls') class LoginRequiredTestCase(AuthViewsTestCase): """ Tests the login_required decorators """ def testCallable(self): """ Check that login_required is assignable to callable objects. """ class CallableView(object): def __call__(self, *args, **kwargs): pass login_required(CallableView()) def testView(self): """ Check that login_required is assignable to normal views. """ def normal_view(request): pass login_required(normal_view) def testLoginRequired(self, view_url='/login_required/', login_url=None): """ Check that login_required works on a simple view wrapped in a login_required decorator. """ if login_url is None: login_url = settings.LOGIN_URL response = self.client.get(view_url) self.assertEqual(response.status_code, 302) self.assertIn(login_url, response.url) self.login() response = self.client.get(view_url) self.assertEqual(response.status_code, 200) def testLoginRequiredNextUrl(self): """ Check that login_required works on a simple view wrapped in a login_required decorator with a login_url set. """ self.testLoginRequired(view_url='/login_required_login_url/', login_url='/somewhere/') class PermissionsRequiredDecoratorTest(TestCase): """ Tests for the permission_required decorator """ def setUp(self): self.user = models.User.objects.create(username='joe', password='qwerty') self.factory = RequestFactory() # Add permissions auth.add_customuser and auth.change_customuser perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser')) self.user.user_permissions.add(*perms) def test_many_permissions_pass(self): @permission_required(['auth.add_customuser', 'auth.change_customuser']) def a_view(request): return HttpResponse() request = self.factory.get('/rand') request.user = self.user resp = a_view(request) self.assertEqual(resp.status_code, 200) def test_many_permissions_in_set_pass(self): @permission_required({'auth.add_customuser', 'auth.change_customuser'}) def a_view(request): return HttpResponse() request = self.factory.get('/rand') request.user = self.user resp = a_view(request) self.assertEqual(resp.status_code, 200) def test_single_permission_pass(self): @permission_required('auth.add_customuser') def a_view(request): return HttpResponse() request = self.factory.get('/rand') request.user = self.user resp = a_view(request) self.assertEqual(resp.status_code, 200) def test_permissioned_denied_redirect(self): @permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission']) def a_view(request): return HttpResponse() request = self.factory.get('/rand') request.user = self.user resp = a_view(request) self.assertEqual(resp.status_code, 302) def test_permissioned_denied_exception_raised(self): @permission_required([ 'auth.add_customuser', 'auth.change_customuser', 'non-existent-permission' ], raise_exception=True) def a_view(request): return HttpResponse() request = self.factory.get('/rand') request.user = self.user self.assertRaises(PermissionDenied, a_view, request)
bsd-3-clause
nirmeshk/oh-mainline
vendor/packages/requests/requests/compat.py
1039
1469
# -*- coding: utf-8 -*- """ pythoncompat """ from .packages import chardet import sys # ------- # Pythons # ------- # Syntax sugar. _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) try: import simplejson as json except (ImportError, SyntaxError): # simplejson does not support Python 3.2, it throws a SyntaxError # because of u'...' Unicode literals. import json # --------- # Specifics # --------- if is_py2: from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag from urllib2 import parse_http_list import cookielib from Cookie import Morsel from StringIO import StringIO from .packages.urllib3.packages.ordered_dict import OrderedDict builtin_str = str bytes = str str = unicode basestring = basestring numeric_types = (int, long, float) elif is_py3: from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag from urllib.request import parse_http_list, getproxies, proxy_bypass from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO from collections import OrderedDict builtin_str = str str = str bytes = bytes basestring = (str, bytes) numeric_types = (int, float)
agpl-3.0
renyi533/tensorflow
tensorflow/python/data/util/options.py
22
4914
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for tf.data options.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def _internal_attr_name(name): return "_" + name class OptionsBase(object): """Base class for representing a set of tf.data options. Attributes: _options: Stores the option values. """ def __init__(self): # NOTE: Cannot use `self._options` here as we override `__setattr__` object.__setattr__(self, "_options", {}) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented for name in set(self._options) | set(other._options): # pylint: disable=protected-access if getattr(self, name) != getattr(other, name): return False return True def __ne__(self, other): if isinstance(other, self.__class__): return not self.__eq__(other) else: return NotImplemented def __setattr__(self, name, value): if hasattr(self, name): object.__setattr__(self, name, value) else: raise AttributeError( "Cannot set the property %s on %s." % (name, type(self).__name__)) def create_option(name, ty, docstring, default_factory=lambda: None): """Creates a type-checked property. Args: name: The name to use. ty: The type to use. The type of the property will be validated when it is set. docstring: The docstring to use. default_factory: A callable that takes no arguments and returns a default value to use if not set. Returns: A type-checked property. """ def get_fn(option): # pylint: disable=protected-access if name not in option._options: option._options[name] = default_factory() return option._options.get(name) def set_fn(option, value): if not isinstance(value, ty): raise TypeError("Property \"%s\" must be of type %s, got: %r (type: %r)" % (name, ty, value, type(value))) option._options[name] = value # pylint: disable=protected-access return property(get_fn, set_fn, None, docstring) def merge_options(*options_list): """Merges the given options, returning the result as a new options object. The input arguments are expected to have a matching type that derives from `OptionsBase` (and thus each represent a set of options). The method outputs an object of the same type created by merging the sets of options represented by the input arguments. The sets of options can be merged as long as there does not exist an option with different non-default values. If an option is an instance of `OptionsBase` itself, then this method is applied recursively to the set of options represented by this option. Args: *options_list: options to merge Raises: TypeError: if the input arguments are incompatible or not derived from `OptionsBase` ValueError: if the given options cannot be merged Returns: A new options object which is the result of merging the given options. """ if len(options_list) < 1: raise ValueError("At least one options should be provided") result_type = type(options_list[0]) for options in options_list: if not isinstance(options, result_type): raise TypeError("Incompatible options type: %r vs %r" % (type(options), result_type)) if not isinstance(options_list[0], OptionsBase): raise TypeError("The inputs should inherit from `OptionsBase`") default_options = result_type() result = result_type() for options in options_list: # Iterate over all set options and merge the into the result. for name in options._options: # pylint: disable=protected-access this = getattr(result, name) that = getattr(options, name) default = getattr(default_options, name) if that == default: continue elif this == default: setattr(result, name, that) elif isinstance(this, OptionsBase): setattr(result, name, merge_options(this, that)) elif this != that: raise ValueError( "Cannot merge incompatible values (%r and %r) of option: %s" % (this, that, name)) return result
apache-2.0
ksmaheshkumar/grr
parsers/ie_history.py
8
5599
#!/usr/bin/env python # Copyright 2011 Google Inc. All Rights Reserved. """Parser for IE index.dat files. Note that this is a very naive and incomplete implementation and should be replaced with a more intelligent one. Do not implement anything based on this code, it is a placeholder for something real. For anyone who wants a useful reference, see this: http://heanet.dl.sourceforge.net/project/libmsiecf/Documentation/MSIE%20Cache%20 File%20format/MSIE%20Cache%20File%20%28index.dat%29%20format.pdf """ import datetime import glob import operator import os import struct import sys import urlparse import logging from grr.lib import parsers from grr.lib.rdfvalues import webhistory # Difference between 1 Jan 1601 and 1 Jan 1970. WIN_UNIX_DIFF_MSECS = 11644473600 * 1e6 class IEHistoryParser(parsers.FileParser): """Parse IE index.dat files into BrowserHistoryItem objects.""" output_types = ["BrowserHistoryItem"] supported_artifacts = ["InternetExplorerHistory"] def Parse(self, stat, file_object, knowledge_base): """Parse the History file.""" _, _ = stat, knowledge_base # TODO(user): Convert this to use the far more intelligent plaso parser. ie = IEParser(file_object) for dat in ie.Parse(): yield webhistory.BrowserHistoryItem( url=dat["url"], domain=urlparse.urlparse(dat["url"]).netloc, access_time=dat.get("mtime"), program_name="Internet Explorer", source_urn=stat.aff4path) class IEParser(object): """Parser object for index.dat files. The file format for IE index.dat files is somewhat poorly documented. The following implementation is based on information from: http://www.forensicswiki.org/wiki/Internet_Explorer_History_File_Format Returns results in chronological order based on mtime """ FILE_HEADER = "Client UrlCache MMF Ver 5.2" BLOCK_SIZE = 0x80 def __init__(self, input_obj): """Initialize. Args: input_obj: A file like object to read the index.dat from. """ self._file = input_obj self._entries = [] def Parse(self): """Parse the file.""" if not self._file: logging.error("Couldn't open file") return # Limit read size to 5MB. self.input_dat = self._file.read(1024 * 1024 * 5) if not self.input_dat.startswith(self.FILE_HEADER): logging.error("Invalid index.dat file %s", self._file) return # Events aren't time ordered in the history file, so we collect them all # then sort. events = [] for event in self._DoParse(): events.append(event) for event in sorted(events, key=operator.itemgetter("mtime")): yield event def _GetRecord(self, offset, record_size): """Retrieve a single record from the file. Args: offset: offset from start of input_dat where header starts record_size: length of the header according to file (untrusted) Returns: A dict containing a single browser history record. """ record_header = "<4sLQQL" get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0] url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0] if url_offset in [0xFF, 0xFE]: return None data_offset = get4(offset + 68) data_size = get4(offset + 72) start_pos = offset + data_offset data = struct.unpack("{0}s".format(data_size), self.input_dat[start_pos:start_pos + data_size])[0] fmt = record_header unknown_size = url_offset - struct.calcsize(fmt) fmt += "{0}s".format(unknown_size) fmt += "{0}s".format(record_size - struct.calcsize(fmt)) dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size]) header, blocks, mtime, ctime, ftime, _, url = dat url = url.split(chr(0x00))[0] if mtime: mtime = mtime/10 - WIN_UNIX_DIFF_MSECS if ctime: ctime = ctime/10 - WIN_UNIX_DIFF_MSECS return {"header": header, # the header "blocks": blocks, # number of blocks "urloffset": url_offset, # offset of URL in file "data_offset": data_offset, # offset for start of data "data_size": data_size, # size of data "data": data, # actual data "mtime": mtime, # modified time "ctime": ctime, # created time "ftime": ftime, # file time "url": url # the url visited } def _DoParse(self): """Parse a file for history records yielding dicts. Yields: Dicts containing browser history """ get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0] filesize = get4(0x1c) offset = get4(0x20) coffset = offset while coffset < filesize: etype = struct.unpack("4s", self.input_dat[coffset:coffset + 4])[0] if etype == "REDR": pass elif etype in ["URL "]: # Found a valid record reclen = get4(coffset + 4) * self.BLOCK_SIZE yield self._GetRecord(coffset, reclen) coffset += self.BLOCK_SIZE def main(argv): if len(argv) < 2: print "Usage: {0} index.dat".format(os.path.basename(argv[0])) else: files_to_process = [] for input_glob in argv[1:]: files_to_process += glob.glob(input_glob) for input_file in files_to_process: ie = IEParser(open(input_file)) for dat in ie.Parse(): dat["ctime"] = datetime.datetime.utcfromtimestamp(dat["ctime"] / 1e6) print "{ctime} {header} {url}".format(**dat) if __name__ == "__main__": main(sys.argv)
apache-2.0
KNMI/VERCE
verce-hpc-pe/src/networkx/generators/tests/test_degree_seq.py
61
5734
#!/usr/bin/env python from nose.tools import * import networkx from networkx import * from networkx.generators.degree_seq import * from networkx.utils import uniform_sequence,powerlaw_sequence def test_configuration_model_empty(): # empty graph has empty degree sequence deg_seq=[] G=configuration_model(deg_seq) assert_equal(G.degree(), {}) def test_configuration_model(): deg_seq=[5,3,3,3,3,2,2,2,1,1,1] G=configuration_model(deg_seq,seed=12345678) assert_equal(sorted(G.degree().values(),reverse=True), [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]) assert_equal(sorted(G.degree(range(len(deg_seq))).values(), reverse=True), [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]) # test that fixed seed delivers the same graph deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3] G1=configuration_model(deg_seq,seed=1000) G2=configuration_model(deg_seq,seed=1000) assert_true(is_isomorphic(G1,G2)) G1=configuration_model(deg_seq,seed=10) G2=configuration_model(deg_seq,seed=10) assert_true(is_isomorphic(G1,G2)) @raises(NetworkXError) def test_configuation_raise(): z=[5,3,3,3,3,2,2,2,1,1,1] G = configuration_model(z, create_using=DiGraph()) @raises(NetworkXError) def test_configuation_raise_odd(): z=[5,3,3,3,3,2,2,2,1,1] G = configuration_model(z, create_using=DiGraph()) @raises(NetworkXError) def test_directed_configuation_raise_unequal(): zin = [5,3,3,3,3,2,2,2,1,1] zout = [5,3,3,3,3,2,2,2,1,2] G = directed_configuration_model(zin, zout) def test_directed_configuation_mode(): G = directed_configuration_model([],[],seed=0) assert_equal(len(G),0) def test_expected_degree_graph_empty(): # empty graph has empty degree sequence deg_seq=[] G=expected_degree_graph(deg_seq) assert_equal(G.degree(), {}) def test_expected_degree_graph(): # test that fixed seed delivers the same graph deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3] G1=expected_degree_graph(deg_seq,seed=1000) G2=expected_degree_graph(deg_seq,seed=1000) assert_true(is_isomorphic(G1,G2)) G1=expected_degree_graph(deg_seq,seed=10) G2=expected_degree_graph(deg_seq,seed=10) assert_true(is_isomorphic(G1,G2)) def test_expected_degree_graph_selfloops(): deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3] G1=expected_degree_graph(deg_seq,seed=1000, selfloops=False) G2=expected_degree_graph(deg_seq,seed=1000, selfloops=False) assert_true(is_isomorphic(G1,G2)) def test_expected_degree_graph_skew(): deg_seq=[10,2,2,2,2] G1=expected_degree_graph(deg_seq,seed=1000) G2=expected_degree_graph(deg_seq,seed=1000) assert_true(is_isomorphic(G1,G2)) def test_havel_hakimi_construction(): G = havel_hakimi_graph([]) assert_equal(len(G),0) z=[1000,3,3,3,3,2,2,2,1,1,1] assert_raises(networkx.exception.NetworkXError, havel_hakimi_graph, z) z=["A",3,3,3,3,2,2,2,1,1,1] assert_raises(networkx.exception.NetworkXError, havel_hakimi_graph, z) z=[5,4,3,3,3,2,2,2] G=havel_hakimi_graph(z) G=configuration_model(z) z=[6,5,4,4,2,1,1,1] assert_raises(networkx.exception.NetworkXError, havel_hakimi_graph, z) z=[10,3,3,3,3,2,2,2,2,2,2] G=havel_hakimi_graph(z) assert_raises(networkx.exception.NetworkXError, havel_hakimi_graph, z, create_using=DiGraph()) def test_directed_havel_hakimi(): # Test range of valid directed degree sequences n, r = 100, 10 p = 1.0 / r for i in range(r): G1 = nx.erdos_renyi_graph(n,p*(i+1),None,True) din = list(G1.in_degree().values()) dout = list(G1.out_degree().values()) G2 = nx.directed_havel_hakimi_graph(din, dout) assert_true(din == list(G2.in_degree().values())) assert_true(dout == list(G2.out_degree().values())) # Test non-graphical sequence dout = [1000,3,3,3,3,2,2,2,1,1,1] din=[103,102,102,102,102,102,102,102,102,102] assert_raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) # Test valid sequences dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4] din=[2, 2, 2, 2, 2, 2, 2, 2, 0, 2] G2 = nx.directed_havel_hakimi_graph(din, dout) assert_true(din == list(G2.in_degree().values())) assert_true(dout == list(G2.out_degree().values())) # Test unequal sums din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2] assert_raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) # Test for negative values din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2] assert_raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) def test_degree_sequence_tree(): z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4] G=degree_sequence_tree(z) assert_true(len(G.nodes())==len(z)) assert_true(len(G.edges())==sum(z)/2) assert_raises(networkx.exception.NetworkXError, degree_sequence_tree, z, create_using=DiGraph()) z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] assert_raises(networkx.exception.NetworkXError, degree_sequence_tree, z) def test_random_degree_sequence_graph(): d=[1,2,2,3] G = nx.random_degree_sequence_graph(d) assert_equal(d, list(G.degree().values())) def test_random_degree_sequence_graph_raise(): z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] assert_raises(networkx.exception.NetworkXUnfeasible, random_degree_sequence_graph, z) def test_random_degree_sequence_large(): G = nx.fast_gnp_random_graph(100,0.1) d = G.degree().values() G = nx.random_degree_sequence_graph(d, seed=0) assert_equal(sorted(d), sorted(list(G.degree().values())))
mit
lewixliu/git-repo
subcmds/upload.py
4
21890
# -*- coding:utf-8 -*- # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import copy import re import sys from command import InteractiveCommand from editor import Editor from error import HookError, UploadError from git_command import GitCommand from git_refs import R_HEADS from hooks import RepoHook from pyversion import is_python3 if not is_python3(): input = raw_input # noqa: F821 else: unicode = str UNUSUAL_COMMIT_THRESHOLD = 5 def _ConfirmManyUploads(multiple_branches=False): if multiple_branches: print('ATTENTION: One or more branches has an unusually high number ' 'of commits.') else: print('ATTENTION: You are uploading an unusually high number of commits.') print('YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across ' 'branches?)') answer = input("If you are sure you intend to do this, type 'yes': ").strip() return answer == "yes" def _die(fmt, *args): msg = fmt % args print('error: %s' % msg, file=sys.stderr) sys.exit(1) def _SplitEmails(values): result = [] for value in values: result.extend([s.strip() for s in value.split(',')]) return result class Upload(InteractiveCommand): common = True helpSummary = "Upload changes for code review" helpUsage = """ %prog [--re --cc] [<project>]... """ helpDescription = """ The '%prog' command is used to send changes to the Gerrit Code Review system. It searches for topic branches in local projects that have not yet been published for review. If multiple topic branches are found, '%prog' opens an editor to allow the user to select which branches to upload. '%prog' searches for uploadable changes in all projects listed at the command line. Projects can be specified either by name, or by a relative or absolute path to the project's local directory. If no projects are specified, '%prog' will search for uploadable changes in all projects listed in the manifest. If the --reviewers or --cc options are passed, those emails are added to the respective list of users, and emails are sent to any new users. Users passed as --reviewers must already be registered with the code review system, or the upload will fail. # Configuration review.URL.autoupload: To disable the "Upload ... (y/N)?" prompt, you can set a per-project or global Git configuration option. If review.URL.autoupload is set to "true" then repo will assume you always answer "y" at the prompt, and will not prompt you further. If it is set to "false" then repo will assume you always answer "n", and will abort. review.URL.autoreviewer: To automatically append a user or mailing list to reviews, you can set a per-project or global Git option to do so. review.URL.autocopy: To automatically copy a user or mailing list to all uploaded reviews, you can set a per-project or global Git option to do so. Specifically, review.URL.autocopy can be set to a comma separated list of reviewers who you always want copied on all uploads with a non-empty --re argument. review.URL.username: Override the username used to connect to Gerrit Code Review. By default the local part of the email address is used. The URL must match the review URL listed in the manifest XML file, or in the .git/config within the project. For example: [remote "origin"] url = git://git.example.com/project.git review = http://review.example.com/ [review "http://review.example.com/"] autoupload = true autocopy = [email protected],[email protected] review.URL.uploadtopic: To add a topic branch whenever uploading a commit, you can set a per-project or global Git option to do so. If review.URL.uploadtopic is set to "true" then repo will assume you always want the equivalent of the -t option to the repo command. If unset or set to "false" then repo will make use of only the command line option. review.URL.uploadhashtags: To add hashtags whenever uploading a commit, you can set a per-project or global Git option to do so. The value of review.URL.uploadhashtags will be used as comma delimited hashtags like the --hashtag option. review.URL.uploadlabels: To add labels whenever uploading a commit, you can set a per-project or global Git option to do so. The value of review.URL.uploadlabels will be used as comma delimited labels like the --label option. review.URL.uploadnotify: Control e-mail notifications when uploading. https://gerrit-review.googlesource.com/Documentation/user-upload.html#notify # References Gerrit Code Review: https://www.gerritcodereview.com/ """ def _Options(self, p): p.add_option('-t', dest='auto_topic', action='store_true', help='Send local branch name to Gerrit Code Review') p.add_option('--hashtag', '--ht', dest='hashtags', action='append', default=[], help='Add hashtags (comma delimited) to the review.') p.add_option('--hashtag-branch', '--htb', action='store_true', help='Add local branch name as a hashtag.') p.add_option('-l', '--label', dest='labels', action='append', default=[], help='Add a label when uploading.') p.add_option('--re', '--reviewers', type='string', action='append', dest='reviewers', help='Request reviews from these people.') p.add_option('--cc', type='string', action='append', dest='cc', help='Also send email to these email addresses.') p.add_option('--br', type='string', action='store', dest='branch', help='Branch to upload.') p.add_option('--cbr', '--current-branch', dest='current_branch', action='store_true', help='Upload current git branch.') p.add_option('--ne', '--no-emails', action='store_false', dest='notify', default=True, help='If specified, do not send emails on upload.') p.add_option('-p', '--private', action='store_true', dest='private', default=False, help='If specified, upload as a private change.') p.add_option('-w', '--wip', action='store_true', dest='wip', default=False, help='If specified, upload as a work-in-progress change.') p.add_option('-o', '--push-option', type='string', action='append', dest='push_options', default=[], help='Additional push options to transmit') p.add_option('-D', '--destination', '--dest', type='string', action='store', dest='dest_branch', metavar='BRANCH', help='Submit for review on this target branch.') p.add_option('-n', '--dry-run', dest='dryrun', default=False, action='store_true', help='Do everything except actually upload the CL.') p.add_option('-y', '--yes', default=False, action='store_true', help='Answer yes to all safe prompts.') p.add_option('--no-cert-checks', dest='validate_certs', action='store_false', default=True, help='Disable verifying ssl certs (unsafe).') # Options relating to upload hook. Note that verify and no-verify are NOT # opposites of each other, which is why they store to different locations. # We are using them to match 'git commit' syntax. # # Combinations: # - no-verify=False, verify=False (DEFAULT): # If stdout is a tty, can prompt about running upload hooks if needed. # If user denies running hooks, the upload is cancelled. If stdout is # not a tty and we would need to prompt about upload hooks, upload is # cancelled. # - no-verify=False, verify=True: # Always run upload hooks with no prompt. # - no-verify=True, verify=False: # Never run upload hooks, but upload anyway (AKA bypass hooks). # - no-verify=True, verify=True: # Invalid g = p.add_option_group('Upload hooks') g.add_option('--no-verify', dest='bypass_hooks', action='store_true', help='Do not run the upload hook.') g.add_option('--verify', dest='allow_all_hooks', action='store_true', help='Run the upload hook without prompting.') g.add_option('--ignore-hooks', dest='ignore_hooks', action='store_true', help='Do not abort uploading if upload hooks fail.') def _SingleBranch(self, opt, branch, people): project = branch.project name = branch.name remote = project.GetBranch(name).remote key = 'review.%s.autoupload' % remote.review answer = project.config.GetBoolean(key) if answer is False: _die("upload blocked by %s = false" % key) if answer is None: date = branch.date commit_list = branch.commits destination = opt.dest_branch or project.dest_branch or project.revisionExpr print('Upload project %s/ to remote branch %s%s:' % (project.relpath, destination, ' (private)' if opt.private else '')) print(' branch %s (%2d commit%s, %s):' % ( name, len(commit_list), len(commit_list) != 1 and 's' or '', date)) for commit in commit_list: print(' %s' % commit) print('to %s (y/N)? ' % remote.review, end='') # TODO: When we require Python 3, use flush=True w/print above. sys.stdout.flush() if opt.yes: print('<--yes>') answer = True else: answer = sys.stdin.readline().strip().lower() answer = answer in ('y', 'yes', '1', 'true', 't') if answer: if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD: answer = _ConfirmManyUploads() if answer: self._UploadAndReport(opt, [branch], people) else: _die("upload aborted by user") def _MultipleBranches(self, opt, pending, people): projects = {} branches = {} script = [] script.append('# Uncomment the branches to upload:') for project, avail in pending: script.append('#') script.append('# project %s/:' % project.relpath) b = {} for branch in avail: if branch is None: continue name = branch.name date = branch.date commit_list = branch.commits if b: script.append('#') destination = opt.dest_branch or project.dest_branch or project.revisionExpr script.append('# branch %s (%2d commit%s, %s) to remote branch %s:' % ( name, len(commit_list), len(commit_list) != 1 and 's' or '', date, destination)) for commit in commit_list: script.append('# %s' % commit) b[name] = branch projects[project.relpath] = project branches[project.name] = b script.append('') script = Editor.EditString("\n".join(script)).split("\n") project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$') branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*') project = None todo = [] for line in script: m = project_re.match(line) if m: name = m.group(1) project = projects.get(name) if not project: _die('project %s not available for upload', name) continue m = branch_re.match(line) if m: name = m.group(1) if not project: _die('project for branch %s not in script', name) branch = branches[project.name].get(name) if not branch: _die('branch %s not in %s', name, project.relpath) todo.append(branch) if not todo: _die("nothing uncommented for upload") many_commits = False for branch in todo: if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD: many_commits = True break if many_commits: if not _ConfirmManyUploads(multiple_branches=True): _die("upload aborted by user") self._UploadAndReport(opt, todo, people) def _AppendAutoList(self, branch, people): """ Appends the list of reviewers in the git project's config. Appends the list of users in the CC list in the git project's config if a non-empty reviewer list was found. """ name = branch.name project = branch.project key = 'review.%s.autoreviewer' % project.GetBranch(name).remote.review raw_list = project.config.GetString(key) if raw_list is not None: people[0].extend([entry.strip() for entry in raw_list.split(',')]) key = 'review.%s.autocopy' % project.GetBranch(name).remote.review raw_list = project.config.GetString(key) if raw_list is not None and len(people[0]) > 0: people[1].extend([entry.strip() for entry in raw_list.split(',')]) def _FindGerritChange(self, branch): last_pub = branch.project.WasPublished(branch.name) if last_pub is None: return "" refs = branch.GetPublishedRefs() try: # refs/changes/XYZ/N --> XYZ return refs.get(last_pub).split('/')[-2] except (AttributeError, IndexError): return "" def _UploadAndReport(self, opt, todo, original_people): have_errors = False for branch in todo: try: people = copy.deepcopy(original_people) self._AppendAutoList(branch, people) # Check if there are local changes that may have been forgotten changes = branch.project.UncommitedFiles() if changes: key = 'review.%s.autoupload' % branch.project.remote.review answer = branch.project.config.GetBoolean(key) # if they want to auto upload, let's not ask because it could be automated if answer is None: print() print('Uncommitted changes in %s (did you forget to amend?):' % branch.project.name) print('\n'.join(changes)) print('Continue uploading? (y/N) ', end='') # TODO: When we require Python 3, use flush=True w/print above. sys.stdout.flush() if opt.yes: print('<--yes>') a = 'yes' else: a = sys.stdin.readline().strip().lower() if a not in ('y', 'yes', 't', 'true', 'on'): print("skipping upload", file=sys.stderr) branch.uploaded = False branch.error = 'User aborted' continue # Check if topic branches should be sent to the server during upload if opt.auto_topic is not True: key = 'review.%s.uploadtopic' % branch.project.remote.review opt.auto_topic = branch.project.config.GetBoolean(key) def _ExpandCommaList(value): """Split |value| up into comma delimited entries.""" if not value: return for ret in value.split(','): ret = ret.strip() if ret: yield ret # Check if hashtags should be included. key = 'review.%s.uploadhashtags' % branch.project.remote.review hashtags = set(_ExpandCommaList(branch.project.config.GetString(key))) for tag in opt.hashtags: hashtags.update(_ExpandCommaList(tag)) if opt.hashtag_branch: hashtags.add(branch.name) # Check if labels should be included. key = 'review.%s.uploadlabels' % branch.project.remote.review labels = set(_ExpandCommaList(branch.project.config.GetString(key))) for label in opt.labels: labels.update(_ExpandCommaList(label)) # Basic sanity check on label syntax. for label in labels: if not re.match(r'^.+[+-][0-9]+$', label): print('repo: error: invalid label syntax "%s": labels use forms ' 'like CodeReview+1 or Verified-1' % (label,), file=sys.stderr) sys.exit(1) # Handle e-mail notifications. if opt.notify is False: notify = 'NONE' else: key = 'review.%s.uploadnotify' % branch.project.remote.review notify = branch.project.config.GetString(key) destination = opt.dest_branch or branch.project.dest_branch # Make sure our local branch is not setup to track a different remote branch merge_branch = self._GetMergeBranch(branch.project) if destination: full_dest = destination if not full_dest.startswith(R_HEADS): full_dest = R_HEADS + full_dest if not opt.dest_branch and merge_branch and merge_branch != full_dest: print('merge branch %s does not match destination branch %s' % (merge_branch, full_dest)) print('skipping upload.') print('Please use `--destination %s` if this is intentional' % destination) branch.uploaded = False continue branch.UploadForReview(people, dryrun=opt.dryrun, auto_topic=opt.auto_topic, hashtags=hashtags, labels=labels, private=opt.private, notify=notify, wip=opt.wip, dest_branch=destination, validate_certs=opt.validate_certs, push_options=opt.push_options) branch.uploaded = True except UploadError as e: branch.error = e branch.uploaded = False have_errors = True print(file=sys.stderr) print('----------------------------------------------------------------------', file=sys.stderr) if have_errors: for branch in todo: if not branch.uploaded: if len(str(branch.error)) <= 30: fmt = ' (%s)' else: fmt = '\n (%s)' print(('[FAILED] %-15s %-15s' + fmt) % ( branch.project.relpath + '/', branch.name, str(branch.error)), file=sys.stderr) print() for branch in todo: if branch.uploaded: print('[OK ] %-15s %s' % ( branch.project.relpath + '/', branch.name), file=sys.stderr) if have_errors: sys.exit(1) def _GetMergeBranch(self, project): p = GitCommand(project, ['rev-parse', '--abbrev-ref', 'HEAD'], capture_stdout=True, capture_stderr=True) p.Wait() local_branch = p.stdout.strip() p = GitCommand(project, ['config', '--get', 'branch.%s.merge' % local_branch], capture_stdout=True, capture_stderr=True) p.Wait() merge_branch = p.stdout.strip() return merge_branch def Execute(self, opt, args): project_list = self.GetProjects(args) pending = [] reviewers = [] cc = [] branch = None if opt.branch: branch = opt.branch for project in project_list: if opt.current_branch: cbr = project.CurrentBranch up_branch = project.GetUploadableBranch(cbr) if up_branch: avail = [up_branch] else: avail = None print('ERROR: Current branch (%s) not uploadable. ' 'You may be able to type ' '"git branch --set-upstream-to m/master" to fix ' 'your branch.' % str(cbr), file=sys.stderr) else: avail = project.GetUploadableBranches(branch) if avail: pending.append((project, avail)) if not pending: if branch is None: print('repo: error: no branches ready for upload', file=sys.stderr) else: print('repo: error: no branches named "%s" ready for upload' % (branch,), file=sys.stderr) return 1 if not opt.bypass_hooks: hook = RepoHook('pre-upload', self.manifest.repo_hooks_project, self.manifest.topdir, self.manifest.manifestProject.GetRemote('origin').url, abort_if_user_denies=True) pending_proj_names = [project.name for (project, available) in pending] pending_worktrees = [project.worktree for (project, available) in pending] passed = True try: hook.Run(opt.allow_all_hooks, project_list=pending_proj_names, worktree_list=pending_worktrees) except SystemExit: passed = False if not opt.ignore_hooks: raise except HookError as e: passed = False print("ERROR: %s" % str(e), file=sys.stderr) if not passed: if opt.ignore_hooks: print('\nWARNING: pre-upload hooks failed, but uploading anyways.', file=sys.stderr) else: return 1 if opt.reviewers: reviewers = _SplitEmails(opt.reviewers) if opt.cc: cc = _SplitEmails(opt.cc) people = (reviewers, cc) if len(pending) == 1 and len(pending[0][1]) == 1: self._SingleBranch(opt, pending[0][1][0], people) else: self._MultipleBranches(opt, pending, people)
apache-2.0
TeradataCenterForHadoop/ambari-presto-service
tests/test_worker.py
2
5422
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import MagicMock, patch, mock_open, call import unittest import os import sys sys.path.append(os.path.dirname(os.path.realpath(__file__))) from package.scripts.presto_worker import Worker from package.scripts.params import memory_configs class TestWorker(unittest.TestCase): dummy_config_properties = {'pseudo.distributed.enabled': False, 'query.queue-config-file': '', 'http-server.http.port': '8285', 'node-scheduler.include-coordinator': False} minimal_config_properties = {'pseudo.distributed.enabled': False, 'node-scheduler.include-coordinator': False} for memory_config in memory_configs: dummy_config_properties[memory_config] = '123' def setUp(self): self.mock_env = MagicMock() @patch('package.scripts.presto_worker.Worker.configure') @patch('package.scripts.presto_worker.Execute') def test_lifecycle_methods_shell_out_to_execute( self, execute_mock, unused_configure_mock): presto_worker = Worker() presto_worker.install(self.mock_env) assert execute_mock.call_count is 2 assert 'wget' in execute_mock.call_args_list[0][0][0] assert 'rpm -i' in execute_mock.call_args_list[1][0][0] assert 'export JAVA8_HOME=' in execute_mock.call_args_list[1][0][0] execute_mock.reset_mock() presto_worker.stop(self.mock_env) assert execute_mock.call_count is 1 assert 'stop' in execute_mock.call_args_list[0][0][0] execute_mock.reset_mock() presto_worker.start(self.mock_env) assert execute_mock.call_count is 1 assert 'start' in execute_mock.call_args_list[0][0][0] execute_mock.reset_mock() presto_worker.status(self.mock_env) assert execute_mock.call_count is 1 assert 'status' in execute_mock.call_args_list[0][0][0] @patch('package.scripts.presto_worker.Worker.configure') @patch('package.scripts.presto_worker.Execute') def test_install_start_configure_presto( self, unused_execute_mock, configure_mock): presto_worker = Worker() presto_worker.install(self.mock_env) assert configure_mock.called configure_mock.reset_mock() presto_worker.start(self.mock_env) assert configure_mock.called @patch('package.scripts.presto_worker.create_connectors') def test_configure_adds_tpch_connector(self, create_connectors_mock): presto_worker = Worker() with patch('__builtin__.open'): presto_worker.configure(self.mock_env) assert call({}, "{'tpch': ['connector.name=tpch']}") in create_connectors_mock.call_args_list @patch('package.scripts.presto_worker.create_connectors') @patch('package.scripts.params.config_properties', new=dummy_config_properties) def test_configure_ignore_pseudo_distribute_enabled_property(self, create_connectors_mock ): config = collect_config_vars_written_out(self.mock_env, Worker()) assert 'pseudo.distributed.enabled=true\n' not in config @patch('package.scripts.presto_worker.create_connectors') @patch('package.scripts.params.config_properties', new=dummy_config_properties) def test_configure_ignore_empty_queue_config_file(self, create_connectors_mock): config = collect_config_vars_written_out(self.mock_env, Worker()) for item in config: assert not item.startswith('query.queue-config-file') @patch('package.scripts.presto_worker.create_connectors') @patch('package.scripts.params.config_properties', new=minimal_config_properties) def test_constant_properties(self, create_connectors_mock): config = collect_config_vars_written_out(self.mock_env, Worker()) assert 'coordinator=false\n' in config assert 'node.data-dir=/var/lib/presto\n' in config @patch('package.scripts.presto_worker.create_connectors') @patch('package.scripts.params.config_properties', new=dummy_config_properties) def test_memory_settings_have_units(self, create_connectors_mock): from test_coordinator import assert_memory_configs_properly_formatted config = collect_config_vars_written_out(self.mock_env, Worker()) assert_memory_configs_properly_formatted(config) def collect_config_vars_written_out(mock_env, obj_under_test): config = [] open_mock = mock_file_descriptor_write_method(config) with patch('__builtin__.open', open_mock): getattr(obj_under_test, 'configure')(mock_env) return config def mock_file_descriptor_write_method(list): def append(item_to_append): list.append(item_to_append) open_mock = mock_open() fd = open_mock() fd.write = append return open_mock
apache-2.0
adsabs/ADSPipelineMsg
adsmsg/protobuf/status_pb2.py
1
2108
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: status.proto from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='status.proto', package='adsmsg', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x0cstatus.proto\x12\x06\x61\x64smsg*7\n\x06Status\x12\n\n\x06\x61\x63tive\x10\x00\x12\x0b\n\x07\x64\x65leted\x10\x01\x12\x07\n\x03new\x10\x02\x12\x0b\n\x07updated\x10\x03\x62\x06proto3' ) _STATUS = _descriptor.EnumDescriptor( name='Status', full_name='adsmsg.Status', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='active', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='deleted', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='new', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='updated', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=24, serialized_end=79, ) _sym_db.RegisterEnumDescriptor(_STATUS) Status = enum_type_wrapper.EnumTypeWrapper(_STATUS) active = 0 deleted = 1 new = 2 updated = 3 DESCRIPTOR.enum_types_by_name['Status'] = _STATUS _sym_db.RegisterFileDescriptor(DESCRIPTOR) # @@protoc_insertion_point(module_scope)
agpl-3.0
edx/edx-ora
peer_grading/migrations/0007_auto__add_unique_calibrationhistory_student_id_location.py
1
5423
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding unique constraint on 'CalibrationHistory', fields ['student_id', 'location'] db.create_unique('peer_grading_calibrationhistory', ['student_id', 'location']) def backwards(self, orm): # Removing unique constraint on 'CalibrationHistory', fields ['student_id', 'location'] db.delete_unique('peer_grading_calibrationhistory', ['student_id', 'location']) models = { 'controller.submission': { 'Meta': {'unique_together': "(('student_response', 'student_id', 'location'),)", 'object_name': 'Submission'}, 'answer': ('django.db.models.fields.TextField', [], {'default': "''"}), 'control_fields': ('django.db.models.fields.TextField', [], {'default': "''"}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'duplicate_submission_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'grader_settings': ('django.db.models.fields.TextField', [], {'default': "''"}), 'has_been_duplicate_checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_display': ('django.db.models.fields.TextField', [], {'default': "''"}), 'is_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_plagiarized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'db_index': 'True'}), 'max_score': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'next_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}), 'posted_results_back_to_queue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'preferred_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}), 'previous_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}), 'problem_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'prompt': ('django.db.models.fields.TextField', [], {'default': "''"}), 'rubric': ('django.db.models.fields.TextField', [], {'default': "''"}), 'skip_basic_checks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'student_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'student_response': ('django.db.models.fields.TextField', [], {'default': "''"}), 'student_submission_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'xqueue_queue_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'xqueue_submission_id': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '1024'}), 'xqueue_submission_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}) }, 'peer_grading.calibrationhistory': { 'Meta': {'unique_together': "(('student_id', 'location'),)", 'object_name': 'CalibrationHistory'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'db_index': 'True'}), 'problem_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'student_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'peer_grading.calibrationrecord': { 'Meta': {'object_name': 'CalibrationRecord'}, 'actual_score': ('django.db.models.fields.IntegerField', [], {}), 'calibration_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['peer_grading.CalibrationHistory']"}), 'feedback': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_pre_calibration': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rubric_scores': ('django.db.models.fields.TextField', [], {'default': "''"}), 'rubric_scores_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'score': ('django.db.models.fields.IntegerField', [], {}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Submission']"}) } } complete_apps = ['peer_grading']
agpl-3.0
rohitwaghchaure/erpnext_develop
erpnext/healthcare/doctype/physician/test_physician.py
5
1077
# -*- coding: utf-8 -*- # Copyright (c) 2015, ESS LLP and Contributors # See license.txt from __future__ import unicode_literals import unittest import frappe test_dependencies = ['Physician Schedule'] class TestPhysician(unittest.TestCase): def tearDown(self): frappe.delete_doc_if_exists('Physician', '_Testdoctor2', force=1) def test_schedule_and_time(self): physician = frappe.new_doc('Physician') physician.first_name = '_Testdoctor2' physician.physician_schedule = '_Testdoctor2 Schedule' self.assertRaises(frappe.ValidationError, physician.insert) physician.physician_schedule = '' physician.time_per_appointment = 15 self.assertRaises(frappe.ValidationError, physician.insert) physician.physician_schedule = '_Testdoctor2 Schedule' physician.time_per_appointment = 15 physician.insert() def test_new_physician_without_schedule(self): physician = frappe.new_doc('Physician') physician.first_name = '_Testdoctor2' physician.insert() self.assertEqual(frappe.get_value('Physician', '_Testdoctor2', 'first_name'), '_Testdoctor2')
gpl-3.0
mads-bertelsen/McCode
meta-pkgs/windows/Support/gnuplot-py-1.8/__init__.py
4
6268
#! /usr/bin/env python # $Id: __init__.py 306 2008-05-02 01:09:02Z alford $ # Copyright (C) 1998-2003 Michael Haggerty <[email protected]> # # This file is licensed under the GNU Lesser General Public License # (LGPL). See LICENSE.txt for details. """Gnuplot -- A pipe-based interface to the gnuplot plotting program. This is the main module of the Gnuplot package. Written by "Michael Haggerty", mailto:[email protected]. Inspired by and partly derived from an earlier version by "Konrad Hinsen", mailto:[email protected]. If you find a problem or have a suggestion, please "let me know", mailto:[email protected]. Other feedback would also be appreciated. The Gnuplot.py home page is at "Gnuplot.py", http://gnuplot-py.sourceforge.net For information about how to use this module: 1. Check the README file. 2. Look at the example code in demo.py and try running it by typing 'python demo.py' or 'python __init__.py'. 3. For more details see the extensive documentation strings throughout the python source files, especially this file, _Gnuplot.py, PlotItems.py, and gp_unix.py. 4. The docstrings have also been turned into html which can be read "here", http://gnuplot-py.sourceforge.net/doc. However, the formatting is not perfect; when in doubt, double-check the docstrings. You should import this file with 'import Gnuplot', not with 'from Gnuplot import *', because the module and the main class have the same name, `Gnuplot'. To obtain the gnuplot plotting program itself, see "the gnuplot FAQ", ftp://ftp.gnuplot.vt.edu/pub/gnuplot/faq/index.html. Obviously you need to have gnuplot installed if you want to use Gnuplot.py. The old command-based interface to gnuplot (previously supported as 'oldplot.py') has been removed from the package. Features: o Allows the creation of two or three dimensional plots from python. o A gnuplot session is an instance of class 'Gnuplot'. Multiple sessions can be open at once. For example:: g1 = Gnuplot.Gnuplot() g2 = Gnuplot.Gnuplot() Note that due to limitations on those platforms, opening multiple simultaneous sessions on Windows or Macintosh may not work correctly. (Feedback?) o The implicitly-generated gnuplot commands can be stored to a file instead of executed immediately:: g = Gnuplot.Gnuplot('commands.txt') The 'commands.txt' file can then be run later with gnuplot's 'load' command. Beware, however: the plot commands may depend on the existence of temporary files, which will probably be deleted before you use the command file. o Can pass arbitrary commands to the gnuplot command interpreter:: g('set pointsize 2') (If this is all you want to do, you might consider using the lightweight GnuplotProcess class defined in gp.py.) o A Gnuplot object knows how to plot objects of type 'PlotItem'. Any PlotItem can have optional 'title' and/or 'with' suboptions. Builtin PlotItem types: * 'Data(array1)' -- data from a Python list or NumPy array (permits additional option 'cols' ) * 'File('filename')' -- data from an existing data file (permits additional option 'using' ) * 'Func('exp(4.0 * sin(x))')' -- functions (passed as a string, evaluated by gnuplot) * 'GridData(m, x, y)' -- data tabulated on a grid of (x,y) values (usually to be plotted in 3-D) See the documentation strings for those classes for more details. o PlotItems are implemented as objects that can be assigned to variables and plotted repeatedly. Most of their plot options can also be changed with the new 'set_option()' member functions then they can be replotted with their new options. o Communication of commands to gnuplot is via a one-way pipe. Communication of data from python to gnuplot is via inline data (through the command pipe) or via temporary files. Temp files are deleted automatically when their associated 'PlotItem' is deleted. The PlotItems in use by a Gnuplot object at any given time are stored in an internal list so that they won't be deleted prematurely. o Can use 'replot' method to add datasets to an existing plot. o Can make persistent gnuplot windows by using the constructor option 'persist=1'. Such windows stay around even after the gnuplot program is exited. Note that only newer version of gnuplot support this option. o Can plot either directly to a postscript printer or to a postscript file via the 'hardcopy' method. o Grid data for the splot command can be sent to gnuplot in binary format, saving time and disk space. o Should work under Unix, Macintosh, and Windows. Restrictions: - Relies on the numpy Python extension. This can be obtained from the Scipy group at <http://www.scipy.org/Download>. If you're interested in gnuplot, you would probably also want numpy anyway. - Only a small fraction of gnuplot functionality is implemented as explicit method functions. However, you can give arbitrary commands to gnuplot manually:: g = Gnuplot.Gnuplot() g('set data style linespoints') g('set pointsize 5') - There is no provision for missing data points in array data (which gnuplot allows via the 'set missing' command). Bugs: - No attempt is made to check for errors reported by gnuplot. On unix any gnuplot error messages simply appear on stderr. (I don't know what happens under Windows.) - All of these classes perform their resource deallocation when '__del__' is called. Normally this works fine, but there are well-known cases when Python's automatic resource deallocation fails, which can leave temporary files around. """ __version__ = '1.8' # Other modules that should be loaded for 'from Gnuplot import *': __all__ = ['utils', 'funcutils', ] from gp import GnuplotOpts, GnuplotProcess, test_persist from Errors import Error, OptionError, DataError from PlotItems import PlotItem, Func, File, Data, GridData from _Gnuplot import Gnuplot if __name__ == '__main__': import demo demo.demo()
gpl-2.0
joshuahoman/vivisect
vivisect/parsers/blob.py
4
1764
import envi import vivisect import vivisect.parsers as v_parsers from vivisect.const import * def parseFd(vw, fd, filename=None): fd.seek(0) arch = vw.config.viv.parsers.blob.arch bigend = vw.config.viv.parsers.blob.bigend baseaddr = vw.config.viv.parsers.blob.baseaddr try: envi.getArchModule(arch) except Exception, e: raise Exception('Blob loader *requires* arch option (-O viv.parsers.blob.arch="<archname>")') vw.setMeta('Architecture', arch) vw.setMeta('Platform','unknown') vw.setMeta('Format','blob') vw.setMeta('bigend', bigend) bytez = fd.read() vw.addMemoryMap(baseaddr, 7, filename, bytez) vw.addSegment( baseaddr, len(bytez), '%.8x' % baseaddr, 'blob' ) def parseFile(vw, filename): arch = vw.config.viv.parsers.blob.arch bigend = vw.config.viv.parsers.blob.bigend baseaddr = vw.config.viv.parsers.blob.baseaddr try: envi.getArchModule(arch) except Exception, e: raise Exception('Blob loader *requires* arch option (-O viv.parsers.blob.arch="<archname>")') vw.setMeta('Architecture', arch) vw.setMeta('Platform','unknown') vw.setMeta('Format','blob') vw.setMeta('bigend', bigend) fname = vw.addFile(filename, baseaddr, v_parsers.md5File(filename)) bytez = file(filename, "rb").read() vw.addMemoryMap(baseaddr, 7, filename, bytez) vw.addSegment( baseaddr, len(bytez), '%.8x' % baseaddr, 'blob' ) def parseMemory(vw, memobj, baseaddr): va,size,perms,fname = memobj.getMemoryMap(baseaddr) if not fname: fname = 'map_%.8x' % baseaddr bytes = memobj.readMemory(va, size) fname = vw.addFile(fname, baseaddr, v_parsers.md5Bytes(bytes)) vw.addMemoryMap(va, perms, fname, bytes)
apache-2.0
tjma12/pycbc
pycbc/fft/backend_support.py
1
3120
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ This package provides a front-end to various fast Fourier transform implementations within PyCBC. """ import pycbc import pycbc.scheme # These are global variables, that are modified by the various scheme- # dependent submodules, to maintain a list of all possible backends # for all possible schemes that are available at runtime. This list # and dict are then used when parsing command-line options. _all_backends_list = [] _all_backends_dict = {} # The following is the function called by each scheme's setup to add whatever new # backends may have been found to the global list. Since some backends may be # shared, we must first check to make sure that the item in the list is not already # in the global list, and we assume that the keys to the dict are in one-to-one # correspondence with the items in the list. def _update_global_available(new_list, new_dict, global_list, global_dict): for item in new_list: if item not in global_list: global_list.append(item) global_dict.update({item:new_dict[item]}) def get_backend_modules(): return _all_backends_dict.values() def get_backend_names(): return _all_backends_dict.keys() BACKEND_PREFIX="pycbc.fft.backend_" @pycbc.scheme.schemed(BACKEND_PREFIX) def set_backend(backend_list): err_msg = "This function is a stub that should be overridden using " err_msg += "the scheme. You shouldn't be seeing this error!" raise ValueError(err_msg) @pycbc.scheme.schemed(BACKEND_PREFIX) def get_backend(): err_msg = "This function is a stub that should be overridden using " err_msg += "the scheme. You shouldn't be seeing this error!" raise ValueError(err_msg) # Import all scheme-dependent backends, to get _all_backends accurate: for scheme_name in ["cpu", "mkl", "cuda"]: try: mod = __import__('pycbc.fft.backend_' + scheme_name, fromlist = ['_alist', '_adict']) _alist = getattr(mod, "_alist") _adict = getattr(mod, "_adict") _update_global_available(_alist, _adict, _all_backends_list, _all_backends_dict) except ImportError: pass
gpl-3.0
cernops/neutron
neutron/api/api_common.py
17
10729
# Copyright 2011 Citrix System. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from oslo.config import cfg from webob import exc from neutron.common import constants from neutron.common import exceptions from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) def get_filters(request, attr_info, skips=[]): """Extracts the filters from the request string. Returns a dict of lists for the filters: check=a&check=b&name=Bob& becomes: {'check': [u'a', u'b'], 'name': [u'Bob']} """ res = {} for key, values in request.GET.dict_of_lists().iteritems(): if key in skips: continue values = [v for v in values if v] key_attr_info = attr_info.get(key, {}) if 'convert_list_to' in key_attr_info: values = key_attr_info['convert_list_to'](values) elif 'convert_to' in key_attr_info: convert_to = key_attr_info['convert_to'] values = [convert_to(v) for v in values] if values: res[key] = values return res def get_previous_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[0][id_key] params['marker'] = marker params['page_reverse'] = True return "%s?%s" % (request.path_url, urllib.urlencode(params)) def get_next_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[-1][id_key] params['marker'] = marker params.pop('page_reverse', None) return "%s?%s" % (request.path_url, urllib.urlencode(params)) def get_limit_and_marker(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If limit == 0, it means we needn't pagination, then return None. """ max_limit = _get_pagination_max_limit() limit = _get_limit_param(request, max_limit) if max_limit > 0: limit = min(max_limit, limit) or max_limit if not limit: return None, None marker = request.GET.get('marker', None) return limit, marker def _get_pagination_max_limit(): max_limit = -1 if (cfg.CONF.pagination_max_limit.lower() != constants.PAGINATION_INFINITE): try: max_limit = int(cfg.CONF.pagination_max_limit) if max_limit == 0: raise ValueError() except ValueError: LOG.warn(_("Invalid value for pagination_max_limit: %s. It " "should be an integer greater to 0"), cfg.CONF.pagination_max_limit) return max_limit def _get_limit_param(request, max_limit): """Extract integer limit from request or fail.""" try: limit = int(request.GET.get('limit', 0)) if limit >= 0: return limit except ValueError: pass msg = _("Limit must be an integer 0 or greater and not '%d'") raise exceptions.BadRequest(resource='limit', msg=msg) def list_args(request, arg): """Extracts the list of arg from request.""" return [v for v in request.GET.getall(arg) if v] def get_sorts(request, attr_info): """Extract sort_key and sort_dir from request. Return as: [(key1, value1), (key2, value2)] """ sort_keys = list_args(request, "sort_key") sort_dirs = list_args(request, "sort_dir") if len(sort_keys) != len(sort_dirs): msg = _("The number of sort_keys and sort_dirs must be same") raise exc.HTTPBadRequest(explanation=msg) valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] absent_keys = [x for x in sort_keys if x not in attr_info] if absent_keys: msg = _("%s is invalid attribute for sort_keys") % absent_keys raise exc.HTTPBadRequest(explanation=msg) invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] if invalid_dirs: msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " "valid value is '%(asc)s' and '%(desc)s'") % {'invalid_dirs': invalid_dirs, 'asc': constants.SORT_DIRECTION_ASC, 'desc': constants.SORT_DIRECTION_DESC}) raise exc.HTTPBadRequest(explanation=msg) return zip(sort_keys, [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]) def get_page_reverse(request): data = request.GET.get('page_reverse', 'False') return data.lower() == "true" def get_pagination_links(request, items, limit, marker, page_reverse, key="id"): key = key if key else 'id' links = [] if not limit: return links if not (len(items) < limit and not page_reverse): links.append({"rel": "next", "href": get_next_link(request, items, key)}) if not (len(items) < limit and page_reverse): links.append({"rel": "previous", "href": get_previous_link(request, items, key)}) return links class PaginationHelper(object): def __init__(self, request, primary_key='id'): self.request = request self.primary_key = primary_key def update_fields(self, original_fields, fields_to_add): pass def update_args(self, args): pass def paginate(self, items): return items def get_links(self, items): return {} class PaginationEmulatedHelper(PaginationHelper): def __init__(self, request, primary_key='id'): super(PaginationEmulatedHelper, self).__init__(request, primary_key) self.limit, self.marker = get_limit_and_marker(request) self.page_reverse = get_page_reverse(request) def update_fields(self, original_fields, fields_to_add): if not original_fields: return if self.primary_key not in original_fields: original_fields.append(self.primary_key) fields_to_add.append(self.primary_key) def paginate(self, items): if not self.limit: return items i = -1 if self.marker: for item in items: i = i + 1 if item[self.primary_key] == self.marker: break if self.page_reverse: return items[i - self.limit:i] return items[i + 1:i + self.limit + 1] def get_links(self, items): return get_pagination_links( self.request, items, self.limit, self.marker, self.page_reverse, self.primary_key) class PaginationNativeHelper(PaginationEmulatedHelper): def update_args(self, args): if self.primary_key not in dict(args.get('sorts', [])).keys(): args.setdefault('sorts', []).append((self.primary_key, True)) args.update({'limit': self.limit, 'marker': self.marker, 'page_reverse': self.page_reverse}) def paginate(self, items): return items class NoPaginationHelper(PaginationHelper): pass class SortingHelper(object): def __init__(self, request, attr_info): pass def update_args(self, args): pass def update_fields(self, original_fields, fields_to_add): pass def sort(self, items): return items class SortingEmulatedHelper(SortingHelper): def __init__(self, request, attr_info): super(SortingEmulatedHelper, self).__init__(request, attr_info) self.sort_dict = get_sorts(request, attr_info) def update_fields(self, original_fields, fields_to_add): if not original_fields: return for key in dict(self.sort_dict).keys(): if key not in original_fields: original_fields.append(key) fields_to_add.append(key) def sort(self, items): def cmp_func(obj1, obj2): for key, direction in self.sort_dict: ret = cmp(obj1[key], obj2[key]) if ret: return ret * (1 if direction else -1) return 0 return sorted(items, cmp=cmp_func) class SortingNativeHelper(SortingHelper): def __init__(self, request, attr_info): self.sort_dict = get_sorts(request, attr_info) def update_args(self, args): args['sorts'] = self.sort_dict class NoSortingHelper(SortingHelper): pass class NeutronController(object): """Base controller class for Neutron API.""" # _resource_name will be redefined in sub concrete controller _resource_name = None def __init__(self, plugin): self._plugin = plugin super(NeutronController, self).__init__() def _prepare_request_body(self, body, params): """Verifies required parameters are in request body. Sets default value for missing optional parameters. Body argument must be the deserialized body. """ try: if body is None: # Initialize empty resource for setting default value body = {self._resource_name: {}} data = body[self._resource_name] except KeyError: # raise if _resource_name is not in req body. raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") % self._resource_name) for param in params: param_name = param['param-name'] param_value = data.get(param_name) # If the parameter wasn't found and it was required, return 400 if param_value is None and param['required']: msg = (_("Failed to parse request. " "Parameter '%s' not specified") % param_name) LOG.error(msg) raise exc.HTTPBadRequest(msg) data[param_name] = param_value or param.get('default-value') return body
apache-2.0
davehorton/drachtio-server
deps/boost_1_69_0/tools/build/test/build_file.py
7
5123
#!/usr/bin/python # Copyright (C) 2006. Vladimir Prus # Copyright (C) 2008. Jurko Gospodnetic # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Tests that we explicitly request a file (not target) to be built by # specifying its name on the command line. import BoostBuild ############################################################################### # # test_building_file_from_specific_project() # ------------------------------------------ # ############################################################################### def test_building_file_from_specific_project(): t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """\ exe hello : hello.cpp ; exe hello2 : hello.cpp ; build-project sub ; """) t.write("hello.cpp", "int main() {}\n") t.write("sub/jamfile.jam", """ exe hello : hello.cpp ; exe hello2 : hello.cpp ; exe sub : hello.cpp ; """) t.write("sub/hello.cpp", "int main() {}\n") t.run_build_system(["sub", t.adjust_suffix("hello.obj")]) t.expect_output_lines("*depends on itself*", False) t.expect_addition("sub/bin/$toolset/debug*/hello.obj") t.expect_nothing_more() t.cleanup() ############################################################################### # # test_building_file_from_specific_target() # ----------------------------------------- # ############################################################################### def test_building_file_from_specific_target(): t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """\ exe hello1 : hello1.cpp ; exe hello2 : hello2.cpp ; exe hello3 : hello3.cpp ; """) t.write("hello1.cpp", "int main() {}\n") t.write("hello2.cpp", "int main() {}\n") t.write("hello3.cpp", "int main() {}\n") t.run_build_system(["hello1", t.adjust_suffix("hello1.obj")]) t.expect_addition("bin/$toolset/debug*/hello1.obj") t.expect_nothing_more() t.cleanup() ############################################################################### # # test_building_missing_file_from_specific_target() # ------------------------------------------------- # ############################################################################### def test_building_missing_file_from_specific_target(): t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """\ exe hello1 : hello1.cpp ; exe hello2 : hello2.cpp ; exe hello3 : hello3.cpp ; """) t.write("hello1.cpp", "int main() {}\n") t.write("hello2.cpp", "int main() {}\n") t.write("hello3.cpp", "int main() {}\n") obj = t.adjust_suffix("hello2.obj") t.run_build_system(["hello1", obj], status=1) t.expect_output_lines("don't know how to make*" + obj) t.expect_nothing_more() t.cleanup() ############################################################################### # # test_building_multiple_files_with_different_names() # --------------------------------------------------- # ############################################################################### def test_building_multiple_files_with_different_names(): t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """\ exe hello1 : hello1.cpp ; exe hello2 : hello2.cpp ; exe hello3 : hello3.cpp ; """) t.write("hello1.cpp", "int main() {}\n") t.write("hello2.cpp", "int main() {}\n") t.write("hello3.cpp", "int main() {}\n") t.run_build_system([t.adjust_suffix("hello1.obj"), t.adjust_suffix( "hello2.obj")]) t.expect_addition("bin/$toolset/debug*/hello1.obj") t.expect_addition("bin/$toolset/debug*/hello2.obj") t.expect_nothing_more() t.cleanup() ############################################################################### # # test_building_multiple_files_with_the_same_name() # ------------------------------------------------- # ############################################################################### def test_building_multiple_files_with_the_same_name(): t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """\ exe hello : hello.cpp ; exe hello2 : hello.cpp ; build-project sub ; """) t.write("hello.cpp", "int main() {}\n") t.write("sub/jamfile.jam", """ exe hello : hello.cpp ; exe hello2 : hello.cpp ; exe sub : hello.cpp ; """) t.write("sub/hello.cpp", "int main() {}\n") t.run_build_system([t.adjust_suffix("hello.obj")]) t.expect_output_lines("*depends on itself*", False) t.expect_addition("bin/$toolset/debug*/hello.obj") t.expect_addition("sub/bin/$toolset/debug*/hello.obj") t.expect_nothing_more() t.cleanup() ############################################################################### # # main() # ------ # ############################################################################### test_building_file_from_specific_project() test_building_file_from_specific_target() test_building_missing_file_from_specific_target() test_building_multiple_files_with_different_names() test_building_multiple_files_with_the_same_name()
mit
SmartCash/smartcash
qa/rpc-tests/test_framework/blockstore.py
97
5416
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # BlockStore: a helper class that keeps a map of blocks and implements # helper functions for responding to getheaders and getdata, # and for constructing a getheaders message # from .mininode import * from io import BytesIO import dbm.dumb as dbmd class BlockStore(object): def __init__(self, datadir): self.blockDB = dbmd.open(datadir + "/blocks", 'c') self.currentBlock = 0 self.headers_map = dict() def close(self): self.blockDB.close() def erase(self, blockhash): del self.blockDB[repr(blockhash)] # lookup an entry and return the item as raw bytes def get(self, blockhash): value = None try: value = self.blockDB[repr(blockhash)] except KeyError: return None return value # lookup an entry and return it as a CBlock def get_block(self, blockhash): ret = None serialized_block = self.get(blockhash) if serialized_block is not None: f = BytesIO(serialized_block) ret = CBlock() ret.deserialize(f) ret.calc_sha256() return ret def get_header(self, blockhash): try: return self.headers_map[blockhash] except KeyError: return None # Note: this pulls full blocks out of the database just to retrieve # the headers -- perhaps we could keep a separate data structure # to avoid this overhead. def headers_for(self, locator, hash_stop, current_tip=None): if current_tip is None: current_tip = self.currentBlock current_block_header = self.get_header(current_tip) if current_block_header is None: return None response = msg_headers() headersList = [ current_block_header ] maxheaders = 2000 while (headersList[0].sha256 not in locator.vHave): prevBlockHash = headersList[0].hashPrevBlock prevBlockHeader = self.get_header(prevBlockHash) if prevBlockHeader is not None: headersList.insert(0, prevBlockHeader) else: break headersList = headersList[:maxheaders] # truncate if we have too many hashList = [x.sha256 for x in headersList] index = len(headersList) if (hash_stop in hashList): index = hashList.index(hash_stop)+1 response.headers = headersList[:index] return response def add_block(self, block): block.calc_sha256() try: self.blockDB[repr(block.sha256)] = bytes(block.serialize()) except TypeError as e: print("Unexpected error: ", sys.exc_info()[0], e.args) self.currentBlock = block.sha256 self.headers_map[block.sha256] = CBlockHeader(block) def add_header(self, header): self.headers_map[header.sha256] = header # lookup the hashes in "inv", and return p2p messages for delivering # blocks found. def get_blocks(self, inv): responses = [] for i in inv: if (i.type == 2): # MSG_BLOCK data = self.get(i.hash) if data is not None: # Use msg_generic to avoid re-serialization responses.append(msg_generic(b"block", data)) return responses def get_locator(self, current_tip=None): if current_tip is None: current_tip = self.currentBlock r = [] counter = 0 step = 1 lastBlock = self.get_block(current_tip) while lastBlock is not None: r.append(lastBlock.hashPrevBlock) for i in range(step): lastBlock = self.get_block(lastBlock.hashPrevBlock) if lastBlock is None: break counter += 1 if counter > 10: step *= 2 locator = CBlockLocator() locator.vHave = r return locator class TxStore(object): def __init__(self, datadir): self.txDB = dbmd.open(datadir + "/transactions", 'c') def close(self): self.txDB.close() # lookup an entry and return the item as raw bytes def get(self, txhash): value = None try: value = self.txDB[repr(txhash)] except KeyError: return None return value def get_transaction(self, txhash): ret = None serialized_tx = self.get(txhash) if serialized_tx is not None: f = BytesIO(serialized_tx) ret = CTransaction() ret.deserialize(f) ret.calc_sha256() return ret def add_transaction(self, tx): tx.calc_sha256() try: self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) except TypeError as e: print("Unexpected error: ", sys.exc_info()[0], e.args) def get_transactions(self, inv): responses = [] for i in inv: if (i.type == 1): # MSG_TX tx = self.get(i.hash) if tx is not None: responses.append(msg_generic(b"tx", tx)) return responses
mit
nmayorov/scipy
scipy/stats/tests/test_rank.py
4
8291
import numpy as np from numpy.testing import assert_equal, assert_array_equal from scipy.stats import rankdata, tiecorrect import pytest class TestTieCorrect(object): def test_empty(self): """An empty array requires no correction, should return 1.0.""" ranks = np.array([], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_one(self): """A single element requires no correction, should return 1.0.""" ranks = np.array([1.0], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_no_correction(self): """Arrays with no ties require no correction.""" ranks = np.arange(2.0) c = tiecorrect(ranks) assert_equal(c, 1.0) ranks = np.arange(3.0) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_basic(self): """Check a few basic examples of the tie correction factor.""" # One tie of two elements ranks = np.array([1.0, 2.5, 2.5]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of two elements (same as above, but tie is not at the end) ranks = np.array([1.5, 1.5, 3.0]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of three elements ranks = np.array([1.0, 3.0, 3.0, 3.0]) c = tiecorrect(ranks) T = 3.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # Two ties, lengths 2 and 3. ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0]) c = tiecorrect(ranks) T1 = 2.0 T2 = 3.0 N = ranks.size expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N) assert_equal(c, expected) def test_overflow(self): ntie, k = 2000, 5 a = np.repeat(np.arange(k), ntie) n = a.size # ntie * k out = tiecorrect(rankdata(a)) assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n)) class TestRankData(object): def test_empty(self): """stats.rankdata([]) should return an empty array.""" a = np.array([], dtype=int) r = rankdata(a) assert_array_equal(r, np.array([], dtype=np.float64)) r = rankdata([]) assert_array_equal(r, np.array([], dtype=np.float64)) def test_one(self): """Check stats.rankdata with an array of length 1.""" data = [100] a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, np.array([1.0], dtype=np.float64)) r = rankdata(data) assert_array_equal(r, np.array([1.0], dtype=np.float64)) def test_basic(self): """Basic tests of stats.rankdata.""" data = [100, 10, 50] expected = np.array([3.0, 1.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [40, 10, 30, 10, 50] expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [20, 20, 20, 10, 10, 10] expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) # The docstring states explicitly that the argument is flattened. a2d = a.reshape(2, 3) r = rankdata(a2d) assert_array_equal(r, expected) def test_rankdata_object_string(self): min_rank = lambda a: [1 + sum(i < j for i in a) for j in a] max_rank = lambda a: [sum(i <= j for i in a) for j in a] ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)]) def average_rank(a): return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))] def dense_rank(a): b = np.unique(a) return [1 + sum(i < j for i in b) for j in a] rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank, average=average_rank, dense=dense_rank) def check_ranks(a): for method in 'min', 'max', 'dense', 'ordinal', 'average': out = rankdata(a, method=method) assert_array_equal(out, rankf[method](a)) val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz'] check_ranks(np.random.choice(val, 200)) check_ranks(np.random.choice(val, 200).astype('object')) val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object') check_ranks(np.random.choice(val, 200).astype('object')) def test_large_int(self): data = np.array([2**60, 2**60+1], dtype=np.uint64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, 2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, -2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [2.0, 1.0]) def test_big_tie(self): for n in [10000, 100000, 1000000]: data = np.ones(n, dtype=int) r = rankdata(data) expected_rank = 0.5 * (n + 1) assert_array_equal(r, expected_rank * data, "test failed with n=%d" % n) def test_axis(self): data = [[0, 2, 1], [4, 2, 2]] expected0 = [[1., 1.5, 1.], [2., 1.5, 2.]] r0 = rankdata(data, axis=0) assert_array_equal(r0, expected0) expected1 = [[1., 3., 2.], [3., 1.5, 1.5]] r1 = rankdata(data, axis=1) assert_array_equal(r1, expected1) methods = ["average", "min", "max", "dense", "ordinal"] dtypes = [np.float64] + [np.int_]*4 @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("method, dtype", zip(methods, dtypes)) def test_size_0_axis(self, axis, method, dtype): shape = (3, 0) data = np.zeros(shape) r = rankdata(data, method=method, axis=axis) assert_equal(r.shape, shape) assert_equal(r.dtype, dtype) _cases = ( # values, method, expected ([], 'average', []), ([], 'min', []), ([], 'max', []), ([], 'dense', []), ([], 'ordinal', []), # ([100], 'average', [1.0]), ([100], 'min', [1.0]), ([100], 'max', [1.0]), ([100], 'dense', [1.0]), ([100], 'ordinal', [1.0]), # ([100, 100, 100], 'average', [2.0, 2.0, 2.0]), ([100, 100, 100], 'min', [1.0, 1.0, 1.0]), ([100, 100, 100], 'max', [3.0, 3.0, 3.0]), ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]), ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]), # ([100, 300, 200], 'average', [1.0, 3.0, 2.0]), ([100, 300, 200], 'min', [1.0, 3.0, 2.0]), ([100, 300, 200], 'max', [1.0, 3.0, 2.0]), ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]), ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]), # ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]), ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]), ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]), ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]), ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]), # ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]), ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]), ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]), ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]), ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]), # ([10] * 30, 'ordinal', np.arange(1.0, 31.0)), ) def test_cases(): for values, method, expected in _cases: r = rankdata(values, method=method) assert_array_equal(r, expected)
bsd-3-clause
enStratus/unix-agent
src/dcm/agent/messaging/reply.py
3
40292
# # Copyright (C) 2014 Dell, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import threading import signal import sys import dcm.agent.exceptions as exceptions import dcm.agent.logger as dcm_logger import dcm.agent.messaging.states as states import dcm.agent.messaging.types as message_types import dcm.agent.messaging.utils as utils import dcm.agent.events.state_machine as state_machine import dcm.agent.utils as agent_util import dcm.eventlog.tracer as tracer from dcm.agent.events.globals import global_space as dcm_events _g_logger = logging.getLogger(__name__) class ReplyRPC(object): MISSING_VALUE_STRING = "DEADBEEF" def __init__(self, reply_listener, agent_id, connection, request_id, request_document, db, timeout=1.0, reply_doc=None, start_state=states.ReplyStates.REQUESTING): self._agent_id = agent_id self._request_id = request_id self._request_document = request_document self._cancel_callback = None self._cancel_callback_args = None self._cancel_callback_kwargs = None self._reply_message_timer = None self._reply_listener = reply_listener self._timeout = timeout self._conn = connection self._resend_reply_cnt = 0 self._resend_reply_cnt_threshold = 5 self._lock = threading.RLock() self._response_doc = reply_doc self._sm = state_machine.StateMachine(start_state) self._setup_states() self._db = db def get_request_id(self): return self._request_id def lock(self): self._lock.acquire() def unlock(self): self._lock.release() def get_message_payload(self): return self._request_document["payload"] def shutdown(self): with tracer.RequestTracer(self._request_id): try: if self._reply_message_timer: self._reply_message_timer.cancel() self._reply_listener.message_done(self) except Exception as ex: _g_logger.warn("Error shutting down the request", ex) def kill(self): with tracer.RequestTracer(self._request_id): if self._reply_message_timer: try: self._reply_message_timer.cancel() except Exception as ex: _g_logger.info("an exception occurred when trying to " "cancel the timer: " + str(ex)) @agent_util.class_method_sync def ack(self, cancel_callback, cancel_callback_args, cancel_callback_kwargs): """ Indicate to the messaging system that you have successfully received this message and stored it for processing. """ with tracer.RequestTracer(self._request_id): self._cancel_callback = cancel_callback self._cancel_callback_args = cancel_callback_args if self._cancel_callback_args is None: self._cancel_callback_args = [] self._cancel_callback_args.insert(0, self) self._cancel_callback_kwargs = cancel_callback_kwargs self._sm.event_occurred(states.ReplyEvents.USER_ACCEPTS_REQUEST, message={}) @agent_util.class_method_sync def nak(self, response_document): """ This function is called to out right reject the message. The user is signifying that this message will not be processed at all. A call to this function signifies that this object will no longer be referenced by the user. """ with tracer.RequestTracer(self._request_id): self._sm.event_occurred(states.ReplyEvents.USER_REJECTS_REQUEST, message=response_document) @agent_util.class_method_sync def reply(self, response_document): """ Send a reply to this request. This signifies that the user is done with this object. """ with tracer.RequestTracer(self._request_id): _g_logger.debug("reply() has been called") self._sm.event_occurred(states.ReplyEvents.USER_REPLIES, message=response_document) @agent_util.class_method_sync def reply_timeout(self, message_timer): with tracer.RequestTracer(self._request_id): _g_logger.debug("reply timeout occurred, resending.") self._sm.event_occurred(states.RequesterEvents.TIMEOUT, message_timer=message_timer) @agent_util.class_method_sync def incoming_message(self, json_doc): with tracer.RequestTracer(self._request_id): type_to_event = { message_types.MessageTypes.ACK: states.ReplyEvents.REPLY_ACK_RECEIVED, message_types.MessageTypes.NACK: states.ReplyEvents.REPLY_NACK_RECEIVED, message_types.MessageTypes.CANCEL: states.ReplyEvents.CANCEL_RECEIVED, message_types.MessageTypes.STATUS: states.ReplyEvents.STATUS_RECEIVED, message_types.MessageTypes.REQUEST: states.ReplyEvents.REQUEST_RECEIVED } if 'type' not in json_doc: raise exceptions.MissingMessageParameterException('type') if json_doc['type'] not in type_to_event: raise exceptions.InvalidMessageParameterValueException( 'type', json_doc['type']) # this next call drives the state machine self._sm.event_occurred(type_to_event[json_doc['type']], message=json_doc) def _send_reply_message(self, message_timer): self._reply_message_timer = message_timer message_timer.send(self._conn) ################################################################### # state machine event handlers # ever method that starts with _sm_ is called under the same lock. ################################################################### def _sm_initial_request_received(self, **kwargs): """ This is the initial request, we simply set this to the requesting state. """ pass def _sm_requesting_retransmission_received(self, **kwargs): """ After receiving an initial request we receive a retransmission of it. The user has not yet acked the message but they have been notified that the message exists. In this case we do nothing but wait for the user to ack the message """ pass def _sm_requesting_cancel_received(self, **kwargs): """ A cancel message flows over the wire after the request is received but before it is acknowledged. Here we will tell the user about the cancel. It is important that the cancel notification comes after the message received notification. """ dcm_events.register_callback( self._cancel_callback, args=self._cancel_callback_args, kwargs=self._cancel_callback_kwargs) def _sm_requesting_user_accepts(self, **kwargs): """ The user decided to accept the message. Here we will send the ack """ self._db.new_record(self._request_id, self._request_document, None, states.ReplyStates.ACKED, self._agent_id) ack_doc = {'type': message_types.MessageTypes.ACK, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'entity': "user_accepts", 'agent_id': self._agent_id} self._conn.send(ack_doc) def _sm_requesting_user_replies(self, **kwargs): """ The user decides to reply before acknowledging the message. Therefore we just send the reply and it acts as the ack and the reply """ self._response_doc = kwargs['message'] self._db.update_record(self._request_id, states.ReplyStates.REPLY, reply_doc=self._response_doc) reply_doc = {'type': message_types.MessageTypes.REPLY, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'payload': self._response_doc, 'entity': "user_replies", 'agent_id': self._agent_id} message_timer = utils.MessageTimer(self._timeout, self.reply_timeout, reply_doc) self._send_reply_message(message_timer) def _sm_requesting_user_rejects(self, **kwargs): """ The user decides to reject the incoming request so we must send a nack to the remote side. """ self._db.new_record(self._request_id, self._request_document, None, states.ReplyStates.ACKED, self._agent_id) nack_doc = {'type': message_types.MessageTypes.NACK, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'entity': "user_rejects", 'error_message': "The agent rejected the request.", 'agent_id': self._agent_id} self._conn.send(nack_doc) def _sm_acked_request_received(self, **kwargs): """ In this case a retransmission of the request comes in after the user acknowledged the message. Here we resend the ack. """ # reply using the latest message id ack_doc = {'type': message_types.MessageTypes.ACK, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'entity': "request_received", 'agent_id': self._agent_id} self._conn.send(ack_doc) def _sm_acked_cancel_received(self, **kwargs): """ A cancel is received from the remote end. We simply notify the user of the request and allow the user to act upon it. """ dcm_events.register_callback( self._cancel_callback, args=self._cancel_callback_args, kwargs=self._cancel_callback_kwargs) def _sm_acked_reply(self, **kwargs): """ This is the standard case. A user has accepted the message and is now replying to it. We send the reply. """ self._response_doc = kwargs['message'] self._db.update_record(self._request_id, states.ReplyStates.REPLY, reply_doc=self._response_doc) reply_doc = {'type': message_types.MessageTypes.REPLY, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'payload': self._response_doc, 'entity': "acked_reply", 'agent_id': self._agent_id} message_timer = utils.MessageTimer(self._timeout, self.reply_timeout, reply_doc) self._send_reply_message(message_timer) def _sm_acked_re_reply(self, **kwargs): self._db.update_record(self._request_id, states.ReplyStates.REPLY, reply_doc=self._response_doc) reply_doc = {'type': message_types.MessageTypes.REPLY, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'payload': self._response_doc, 'entity': "acked_reply", 'agent_id': self._agent_id} message_timer = utils.MessageTimer(self._timeout, self.reply_timeout, reply_doc) self._send_reply_message(message_timer) def _sm_reply_request_retrans(self, **kwargs): """ After replying to a message we receive a retransmission of the original request. This can happen if the remote end never receives an ack and the reply message is either lost or delayed. Here we retransmit the reply. """ reply_doc = {'type': message_types.MessageTypes.REPLY, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'payload': self._response_doc, 'entity': "request_retrans", 'agent_id': self._agent_id} message_timer = utils.MessageTimer(self._timeout, self.reply_timeout, reply_doc) self._send_reply_message(message_timer) def _sm_reply_cancel_received(self, **kwargs): """ This occurs when a cancel is received after a reply is sent. It can happen if the remote end sends a cancel before the reply is received. Because we have already finished with this request we simply ignore this message. """ pass def _sm_reply_ack_received(self, **kwargs): """ This is the standard case. A reply is sent and the ack to that reply is received. At this point we know that the RPC was successful. """ self._db.update_record(self._request_id, states.ReplyStates.REPLY_ACKED) self._reply_message_timer.cancel() self._reply_message_timer = None self._reply_listener.message_done(self) _g_logger.debug("Messaging complete. State event transition: " + str(self._sm.get_event_list())) def _sm_reply_nack_received(self, **kwargs): """ The reply was nacked. This is probably a result of the a retransmission that was not needed. """ self._db.update_record(self._request_id, states.ReplyStates.REPLY_NACKED) self._reply_message_timer.cancel() self._reply_message_timer = None self._reply_listener.message_done(self) _g_logger.debug("Reply NACKed, messaging complete. State event " "transition: " + str(self._sm.get_event_list())) def _sm_reply_ack_timeout(self, **kwargs): """ This happens when after a given amount of time an ack has still not been received. We thus must re-send the reply. """ message_timer = kwargs['message_timer'] # The time out did occur before the message could be acked so we must # resend it _g_logger.info("Resending reply") self._resend_reply_cnt += 1 if self._resend_reply_cnt > self._resend_reply_cnt_threshold: # TODO punt at some point ? pass self._send_reply_message(message_timer) def _sm_nacked_request_received(self, **kwargs): """ This happens when a request is received after it has been nacked. This will occur if the first nack is lost or delayed. We retransmit the nack """ nack_doc = {'type': message_types.MessageTypes.NACK, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'entity': "request_received", 'error_message': "The agent already rejected this request", 'agent_id': self._agent_id} self._conn.send(nack_doc) def _sm_cancel_waiting_ack(self, **kwargs): """ If a cancel is received while in the requesting state we must make sure that the user does not get the cancel callback until after they have acked the message. This handler occurs when the user calls ack() after a cancel has arrived. Here we just register a cancel callback and let the user react to it how they will. """ dcm_events.register_user_callback( self._cancel_callback, args=self._cancel_callback_args, kwargs=self._cancel_callback_kwargs) def _sm_send_status(self): status_doc = {'type': message_types.MessageTypes.STATUS, 'message_id': utils.new_message_id(), 'request_id': self._request_id, 'entity': "status send", 'agent_id': self._agent_id, 'state': self._sm._current_state, 'reply': self._response_doc} self._conn.send(status_doc) def _sm_reinflated_reply_ack(self): _g_logger.warn("The agent manager sent a message for this request " "after it was in the REPLY_ACK state") def _sm_reinflated_reply_nack(self): _g_logger.warn("The agent manager sent a message for this request " "after it was in the REPLY_NACK state") def _reinflate_done(self): if self._reply_message_timer: self._reply_message_timer.cancel() self._reply_message_timer = None self._reply_listener.message_done(self) def _sm_reply_ack_re_acked(self, message=None): """ This is called when a re-inflated state had already been reply acked, and is now acked again. We just take it out of memory. """ self._reinflate_done() def _sm_reply_ack_now_nacked(self, message=None): """ This is called whenever a re-inflated command reaches a terminal state that was """ self._reinflate_done() def _sm_reply_nack_re_nacked(self, message=None): """ This is called when a re-inflated state had already been reply nacked, and is now nacked again. We just take it out of memory. """ self._reinflate_done() def _sm_reply_nack_now_acked(self, message=None): """ This is called whenever a re-inflated command reaches acked state but it was previously nacked """ self._reinflate_done() def _sm_ack_reply_nack_received(self, message=None): _g_logger.warn("A NACK was received when in the ACK state " + str(message)) # this will be cleaned up when the command replies, which it is # required to do def _sm_replied_nacked_reply(self, message=None): """ This is called when a request was received but the ACK for that request received a NACK. However the command finished running and a reply was sent back. Here we cancel the message and log the event """ _g_logger.warn("A command that was already finished ended " + str(message)) self.shutdown() def _setup_states(self): self._sm.add_transition(states.ReplyStates.NEW, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.REQUESTING, self._sm_initial_request_received) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.REQUESTING, self._sm_requesting_retransmission_received) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.CANCEL_RECEIVED_REQUESTING, self._sm_requesting_cancel_received) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.USER_ACCEPTS_REQUEST, states.ReplyStates.ACKED, self._sm_requesting_user_accepts) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.USER_REPLIES, states.ReplyStates.REPLY, self._sm_requesting_user_replies) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.USER_REJECTS_REQUEST, states.ReplyStates.NACKED, self._sm_requesting_user_rejects) self._sm.add_transition(states.ReplyStates.REQUESTING, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.REQUESTING, self._sm_send_status) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.CANCEL_RECEIVED_REQUESTING, self._sm_requesting_retransmission_received) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.CANCEL_RECEIVED_REQUESTING, None) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.USER_ACCEPTS_REQUEST, states.ReplyStates.ACKED, self._sm_cancel_waiting_ack) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.USER_REPLIES, states.ReplyStates.REPLY, self._sm_requesting_user_replies) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.USER_REJECTS_REQUEST, states.ReplyStates.NACKED, self._sm_requesting_user_rejects) self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.CANCEL_RECEIVED_REQUESTING, self._sm_send_status) self._sm.add_transition(states.ReplyStates.ACKED, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.ACKED, self._sm_acked_request_received) self._sm.add_transition(states.ReplyStates.ACKED, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.ACKED, self._sm_acked_cancel_received) self._sm.add_transition(states.ReplyStates.ACKED, states.ReplyEvents.USER_REPLIES, states.ReplyStates.REPLY, self._sm_acked_reply) self._sm.add_transition(states.ReplyStates.ACKED, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.ACKED, self._sm_send_status) # if the AM receives and ACK but has never heard of the request ID # it will send a nack. this should not happen in a normal course # of events. At this point we should just kill the request and # log a scary message. We also need to kill anything running for that # that request # This will happen when the agent manager quits on a request before # the agent sends the ack. when the AM receives the ack it has already # canceled the request and thus NACKs the ACK self._sm.add_transition(states.ReplyStates.ACKED, states.ReplyEvents.REPLY_NACK_RECEIVED, states.ReplyStates.REPLY_NACKED, self._sm_ack_reply_nack_received) # note, eventually we will want to reply retrans logic to just punt self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.REPLY, self._sm_reply_request_retrans) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.USER_REPLIES, states.ReplyStates.REPLY, self._sm_acked_reply) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.REPLY, self._sm_reply_cancel_received) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.REPLY_ACK_RECEIVED, states.ReplyStates.REPLY_ACKED, self._sm_reply_ack_received) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.TIMEOUT, states.ReplyStates.REPLY, self._sm_reply_ack_timeout) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.REPLY_NACK_RECEIVED, states.ReplyStates.REPLY_NACKED, self._sm_reply_nack_received) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.REPLY, self._sm_send_status) self._sm.add_transition(states.ReplyStates.REPLY, states.ReplyEvents.DB_INFLATE, states.ReplyStates.REPLY, self._sm_acked_re_reply) self._sm.add_transition(states.ReplyStates.NACKED, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.NACKED, self._sm_nacked_request_received) self._sm.add_transition(states.ReplyStates.NACKED, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.NACKED, self._sm_send_status) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.REPLY_ACKED, self._sm_reply_request_retrans) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.REPLY_ACK_RECEIVED, states.ReplyStates.REPLY_ACKED, self._sm_reply_ack_re_acked) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.REPLY_NACK_RECEIVED, states.ReplyStates.REPLY_ACKED, self._sm_reply_ack_now_nacked) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.REPLY_ACKED, None) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.STATUS_RECEIVED, states.ReplyStates.REPLY_ACKED, self._sm_send_status) self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.TIMEOUT, states.ReplyStates.REPLY_ACKED, None) # this transition should only occur when the AM makes a mistake # or messages are received out of order. self._sm.add_transition(states.ReplyStates.REPLY_ACKED, states.ReplyEvents.DB_INFLATE, states.ReplyStates.REPLY_ACKED, self._sm_reinflated_reply_ack) self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.REQUEST_RECEIVED, states.ReplyStates.REPLY_NACKED, self._sm_reply_request_retrans) self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.REPLY_ACK_RECEIVED, states.ReplyStates.REPLY_NACKED, self._sm_reply_nack_re_nacked) self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.REPLY_NACK_RECEIVED, states.ReplyStates.REPLY_NACKED, self._sm_reply_nack_now_acked) self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.CANCEL_RECEIVED, states.ReplyStates.REPLY_NACKED, None) # this will happen when the plugin finishes and thus replies # to a request that had its ACK NACKed. In this case we # just cancel the messaging and log a message self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.USER_REPLIES, states.ReplyStates.REPLY_NACKED, self._sm_replied_nacked_reply) # this next state should only occur when a message is out # of order or the agent manager made a mistake self._sm.add_transition(states.ReplyStates.REPLY_NACKED, states.ReplyEvents.DB_INFLATE, states.ReplyStates.REPLY_NACKED, self._sm_reinflated_reply_ack) class RequestListener(object): def __init__(self, conf, sender_connection, dispatcher, db, id_system=None): self._conn = sender_connection self._dispatcher = dispatcher self._requests = {} self._messages_processed = 0 self._reply_observers = [] self._timeout = conf.messaging_retransmission_timeout self._shutdown = False self._conf = conf self._db = db self._id_system = id_system self._lock = threading.RLock() self._db.starting_agent() def get_reply_observers(self): # get the whole list so that the user can add and remove themselves. # This sort of thing should be done only with carefully writen code # using carefully writen observers that do very light weight # nonblocking operations return self._reply_observers def _call_reply_observers(self, func_name, argument): for o in self._reply_observers: try: func = getattr(o, func_name) func(argument) except: _g_logger.exception("A bad observer threw an exception.") # dont let some crappy observer ruin everything pass def _process_doc(self, incoming_doc): if incoming_doc is None: return with tracer.RequestTracer(incoming_doc['request_id']): self._call_reply_observers("incoming_message", incoming_doc) _g_logger.debug("New message type %s" % incoming_doc['type']) # if the agent is misbehaving the AM might tell it to kill itself. # cold. if incoming_doc["type"] == message_types.MessageTypes.HEMLOCK: _g_logger.error("HEMLOCK: DCM told me to kill myself.") os.killpg(0, signal.SIGKILL) sys.exit(10) # if it is a alert message short circuit if incoming_doc["type"] == message_types.MessageTypes.ALERT_ACK: if self._id_system: self._id_system.incoming_message(incoming_doc) return request_id = incoming_doc["request_id"] # is this request already in memory? if request_id in self._requests: _g_logger.debug("The message was found in the requests.") # send it through, state machine will deal with it req = self._requests[request_id] req.incoming_message(incoming_doc) return # if the request id has already been seen by the database db_record = self._db.lookup_req(request_id) if db_record: _g_logger.info("Inflating the record from the DB." + request_id) req = ReplyRPC( self, self._conf.agent_id, self._conn, request_id, incoming_doc, self._db, timeout=self._timeout, reply_doc=db_record.reply_doc, start_state=db_record.state) # this will probably be used in the near future so get it # on the memory list self._requests[request_id] = req req.incoming_message(incoming_doc) return if incoming_doc["type"] == message_types.MessageTypes.REQUEST: if len(list(self._requests.keys())) >=\ self._conf.messaging_max_at_once > -1: # short circuit the case where the agent is too busy dcm_logger.log_to_dcm_console_overloaded( msg="The new request was rejected because the agent has too many outstanding requests.") nack_doc = { 'type': message_types.MessageTypes.NACK, 'message_id': utils.new_message_id(), 'request_id': request_id, 'agent_id': self._conf.agent_id, 'error_message': ("The agent can only handle %d " "commands at once" % self._conf.messaging_max_at_once)} self._conn.send(nack_doc) return _g_logger.debug("A new request has come in.") req = ReplyRPC( self, self._conf.agent_id, self._conn, request_id, incoming_doc, self._db, timeout=self._timeout) self._call_reply_observers("new_message", req) # only add the message if processing was successful self._requests[request_id] = req try: self._dispatcher.incoming_request(req) except Exception: _g_logger.exception("The dispatcher could not handle a " "message.") del self._requests[request_id] dcm_logger.log_to_dcm_console_messaging_error( msg="The dispatcher could not handle the message.") raise else: # if we have never heard of the ID and this is not a new # request we return a courtesy error _g_logger.debug("Unknown message ID sending a NACK") nack_doc = {'type': message_types.MessageTypes.NACK, 'message_id': utils.new_message_id(), 'request_id': request_id, 'agent_id': self._conf.agent_id, 'error_message': "%s is an unknown ID" % request_id} self._conn.send(nack_doc) def _validate_doc(self, incoming_doc): pass def _send_bad_message_reply(self, incoming_doc, message): _g_logger.debug("Sending the bad message %s" % message) # we want to send a NACK to the message however it may be an error # because it was not formed with message_id or request_id. In this # case we will send values in that place indicating that *a* message # was bad. There will be almost no way for the sender to know which # one try: request_id = incoming_doc['request_id'] except KeyError: request_id = ReplyRPC.MISSING_VALUE_STRING nack_doc = {'type': message_types.MessageTypes.NACK, 'message_id': utils.new_message_id(), 'request_id': request_id, 'error_message': message, 'agent_id': self._conf.agent_id} self._conn.send(nack_doc) def message_done(self, reply_message): self._lock.acquire() try: request_id = reply_message.get_request_id() del self._requests[request_id] _g_logger.debug("The message %s has completed and is being " "removed" % request_id) self._messages_processed += 1 finally: self._lock.release() self._call_reply_observers("message_done", reply_message) def get_messages_processed(self): return self._messages_processed def is_busy(self): return len(self._requests) != 0 def shutdown(self): """ Stop accepting new requests but allow for outstanding messages to complete. """ self._shutdown = True for req in list(self._requests.values()): req.kill() def wait_for_all_nicely(self): # XXX TODO how long should this block? do we need this? # looks like just for tests while self._requests: dcm_events.poll() def reply(self, request_id, reply_doc): reply_req = self._requests[request_id] reply_req.reply(reply_doc) def incoming_parent_q_message(self, incoming_doc): _g_logger.debug("Received message %s" % str(incoming_doc)) try: self._validate_doc(incoming_doc) self._process_doc(incoming_doc) except Exception as ex: _g_logger.exception( "Error processing the message: %s" % str(incoming_doc)) self._send_bad_message_reply(incoming_doc, str(ex)) class ReplyObserverInterface(object): @agent_util.not_implemented_decorator def new_message(self, reply): pass @agent_util.not_implemented_decorator def message_done(self, reply): pass @agent_util.not_implemented_decorator def incoming_message(self, incoming_doc): pass
apache-2.0
enikesha/ga-bitbot
libs/call_metrics.py
18
2700
""" call_metrics v0.01 a class function decorator which collects metrics (number of calls and total execution time) Copyright 2012 Brian Monkaba This file is part of ga-bitbot. ga-bitbot is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ga-bitbot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>. """ from functools import wraps import time _metrics = {} #decorator which can be used on class methods #inserts a _metrics dictionary into the object def class_call_metrics(func): @wraps(func) def _decorator(self, *args, **kwargs): if not hasattr(self, '_metrics'): self._metrics = {} start = time.time() result = func(self, *args, **kwargs) finish = time.time() if not self._metrics.has_key(func.__name__): self._metrics.update({func.__name__:{'total_time':0,'calls':0}}) self._metrics[func.__name__]['total_time'] += finish - start self._metrics[func.__name__]['calls'] += 1 return result return _decorator #decorator which can be used on funcitons #uses the global _metrics dictionary def call_metrics(func): @wraps(func) def _decorator(*args, **kwargs): start = time.time() result = func(*args, **kwargs) finish = time.time() if not _metrics.has_key(func.__name__): _metrics.update({func.__name__:{'total_time':0,'calls':0}}) _metrics[func.__name__]['total_time'] += finish - start _metrics[func.__name__]['calls'] += 1 return result return _decorator def get_metrics(): return _metrics if __name__ == '__main__': class test(): @class_call_metrics def test(self,data): """test method doc string""" z = 0 for i in range(9999): for x in range(9999): z += 1 print "data:",data return 1 @call_metrics def function_test(data): print "data",data return 2 t = test() for i in range(10): print t.test(i) print t._metrics print t.test.__doc__ print function_test('funciton test input') print get_metrics()
gpl-3.0
nyddle/toster
core/views.py
1
3655
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404 from django.http import Http404 from django.http import HttpResponseNotFound from django.views.generic import View from django.views.generic import ListView from django.views.generic.edit import FormView, ProcessFormView, CreateView from rest_framework import viewsets from .forms import AskQuestionForm from .models import Question, MyUser from .serializers import QuestionSerializer, MyUserSerializer from taggit.models import Tag from bookmarks.handlers import library from bookmarks.models import Bookmark from django.contrib.auth import get_user_model as user_model from actstream import action from actstream.models import model_stream, any_stream class FeedView(View): def get(self, request): return render(request, 'core/feed.html', { 'stream' : any_stream(request.user) }) class QuestionView(View): model = Question def get(self, request, questionid, slug): try: question = Question.objects.get(pk=questionid) except Question.DoesNotExist: raise Http404 question.views += 1 question.save() return render(request, 'core/question.html', {'question': question}) class MyUserView(View): model = MyUser def get(self, request, username): try: user = MyUser.objects.get(username=username) except MyUser.DoesNotExist: raise Http404 return render(request, 'core/user.html', {'user': user}) #TODO: merge with popular view class QuestionListView(ListView): model = Question queryset = Question.objects.order_by('-pub_date') def get_queryset(self): queryset = super(QuestionListView, self).get_queryset() if 'tag' in self.kwargs: tag = self.kwargs['tag'] return queryset.filter(tags__name__in=[tag,]) q = self.request.GET.get("q") if q: return queryset.filter(question__icontains=q) return queryset class PopularQuestionListView(ListView): model = Question queryset = Question.objects.order_by('-rating') class MyUserListView(ListView): model = MyUser class MyUserQuestionListView(ListView): model = Question def get_queryset(self): self.author = get_object_or_404(MyUser, name=self.kwargs['username']) return Question.objects.filter(author=self.author) class QuestionViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = Question.objects.all() serializer_class = QuestionSerializer class MyUserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = MyUser.objects.all() serializer_class = MyUserSerializer class HomeView(View): def get(self, request): return render(request, 'base.html') class AskQuestionView(FormView): template_name = 'core/new_question.html' form_class = AskQuestionForm success_url = '/questions' def form_valid(self, form): form.save() return super(AskQuestionView, self).form_valid(form) class TagListView(ListView): model = Tag #TODO: MembersView? class Members(View): def get(self, request): print('user', request.user) print('session', request.session.keys()) return render(request, 'core/member.html') class BookmarksView(View): def get(self, request): bookmarks = Bookmark.objects.filter_with_contents(user=self.request.user) return render(request, 'core/bookmark_list.html', { 'bookmarks' : bookmarks })
mit
BIDS-collaborative/cega-trace
webscrap/crossref JSON/doi/doi_to_JSON.py
2
1389
import re import urllib import urllib.request import json import sys import codecs def search_doi(s): url = "http://api.crossref.org/works/" + s with urllib.request.urlopen(url) as htmlfile: htmltext = htmlfile.read().decode('utf-8') curdata = json.loads(htmltext) print(htmltext) return curdata def decode(parse_file): with codecs.open(parse_file, 'r+', encoding='utf-8', errors='ignore') as txt_file: txt = txt_file.readlines() return txt def main(): data_ref = [] # get bibliometric for all the references using DOI search by crossref. for i in range(0, 568): try: name = (str(i) + 'doi.txt') data = open(name, 'r') if data: my_list = data for line in my_list: print('reading:' + str(i) + 'doi.txt') cur_data = search_doi(line) cur_data["ID"] = str(i) data_ref.append(cur_data) data.close() # Every time finish searching. overwrite the previous JSON file. with open("master_data_ref.json", "w") as outfile: json.dump(data_ref, outfile) print(str(i) + 'finish searching '+'doi.txt') except IOError: pass except ValueError: pass if __name__ == '__main__': main()
bsd-2-clause
bbuckingham/katello
scripts/reindent.py
69
10490
#! /usr/bin/env python # Released to the public domain, by Tim Peters, 03 October 2000. """reindent [-d][-r][-v] [ path ... ] -d (--dryrun) Dry run. Analyze, but don't make any changes to, files. -r (--recurse) Recurse. Search for all .py files in subdirectories too. -n (--nobackup) No backup. Does not make a ".bak" file before reindenting. -v (--verbose) Verbose. Print informative msgs; else no output. -h (--help) Help. Print this usage information and exit. Change Python (.py) files to use 4-space indents and no hard tab characters. Also trim excess spaces and tabs from ends of lines, and remove empty lines at the end of files. Also ensure the last line ends with a newline. If no paths are given on the command line, reindent operates as a filter, reading a single source file from standard input and writing the transformed source to standard output. In this case, the -d, -r and -v flags are ignored. You can pass one or more file and/or directory paths. When a directory path, all .py files within the directory will be examined, and, if the -r option is given, likewise recursively for subdirectories. If output is not to standard output, reindent overwrites files in place, renaming the originals with a .bak extension. If it finds nothing to change, the file is left alone. If reindent does change a file, the changed file is a fixed-point for future runs (i.e., running reindent on the resulting .py file won't change it again). The hard part of reindenting is figuring out what to do with comment lines. So long as the input files get a clean bill of health from tabnanny.py, reindent should do a good job. The backup file is a copy of the one that is being reindented. The ".bak" file is generated with shutil.copy(), but some corner cases regarding user/group and permissions could leave the backup file more readable that you'd prefer. You can always use the --nobackup option to prevent this. """ __version__ = "1" import tokenize import os, shutil import sys verbose = 0 recurse = 0 dryrun = 0 makebackup = True def usage(msg=None): if msg is not None: print >> sys.stderr, msg print >> sys.stderr, __doc__ def errprint(*args): sep = "" for arg in args: sys.stderr.write(sep + str(arg)) sep = " " sys.stderr.write("\n") def main(): import getopt global verbose, recurse, dryrun, makebackup try: opts, args = getopt.getopt(sys.argv[1:], "drnvh", ["dryrun", "recurse", "nobackup", "verbose", "help"]) except getopt.error, msg: usage(msg) return for o, a in opts: if o in ('-d', '--dryrun'): dryrun += 1 elif o in ('-r', '--recurse'): recurse += 1 elif o in ('-n', '--nobackup'): makebackup = False elif o in ('-v', '--verbose'): verbose += 1 elif o in ('-h', '--help'): usage() return if not args: r = Reindenter(sys.stdin) r.run() r.write(sys.stdout) return for arg in args: check(arg) def check(file): if os.path.isdir(file) and not os.path.islink(file): if verbose: print "listing directory", file names = os.listdir(file) for name in names: fullname = os.path.join(file, name) if ((recurse and os.path.isdir(fullname) and not os.path.islink(fullname) and not os.path.split(fullname)[1].startswith(".")) or name.lower().endswith(".py")): check(fullname) return if verbose: print "checking", file, "...", try: f = open(file) except IOError, msg: errprint("%s: I/O Error: %s" % (file, str(msg))) return r = Reindenter(f) f.close() if r.run(): if verbose: print "changed." if dryrun: print "But this is a dry run, so leaving it alone." if not dryrun: bak = file + ".bak" if makebackup: shutil.copyfile(file, bak) if verbose: print "backed up", file, "to", bak f = open(file, "w") r.write(f) f.close() if verbose: print "wrote new", file return True else: if verbose: print "unchanged." return False def _rstrip(line, JUNK='\n \t'): """Return line stripped of trailing spaces, tabs, newlines. Note that line.rstrip() instead also strips sundry control characters, but at least one known Emacs user expects to keep junk like that, not mentioning Barry by name or anything <wink>. """ i = len(line) while i > 0 and line[i-1] in JUNK: i -= 1 return line[:i] class Reindenter: def __init__(self, f): self.find_stmt = 1 # next token begins a fresh stmt? self.level = 0 # current indent level # Raw file lines. self.raw = f.readlines() # File lines, rstripped & tab-expanded. Dummy at start is so # that we can use tokenize's 1-based line numbering easily. # Note that a line is all-blank iff it's "\n". self.lines = [_rstrip(line).expandtabs() + "\n" for line in self.raw] self.lines.insert(0, None) self.index = 1 # index into self.lines of next line # List of (lineno, indentlevel) pairs, one for each stmt and # comment line. indentlevel is -1 for comment lines, as a # signal that tokenize doesn't know what to do about them; # indeed, they're our headache! self.stats = [] def run(self): tokenize.tokenize(self.getline, self.tokeneater) # Remove trailing empty lines. lines = self.lines while lines and lines[-1] == "\n": lines.pop() # Sentinel. stats = self.stats stats.append((len(lines), 0)) # Map count of leading spaces to # we want. have2want = {} # Program after transformation. after = self.after = [] # Copy over initial empty lines -- there's nothing to do until # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) for i in range(len(stats)-1): thisstmt, thislevel = stats[i] nextstmt = stats[i+1][0] have = getlspace(lines[thisstmt]) want = thislevel * 4 if want < 0: # A comment line. if have: # An indented comment line. If we saw the same # indentation before, reuse what it most recently # mapped to. want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. for j in xrange(i+1, len(stats)-1): jline, jlevel = stats[j] if jlevel >= 0: if have == getlspace(lines[jline]): want = jlevel * 4 break if want < 0: # Maybe it's a hanging # comment like this one, # in which case we should shift it like its base # line got shifted. for j in xrange(i-1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: want = have + getlspace(after[jline-1]) - \ getlspace(lines[jline]) break if want < 0: # Still no luck -- leave it alone. want = have else: want = 0 assert want >= 0 have2want[have] = want diff = want - have if diff == 0 or have == 0: after.extend(lines[thisstmt:nextstmt]) else: for line in lines[thisstmt:nextstmt]: if diff > 0: if line == "\n": after.append(line) else: after.append(" " * diff + line) else: remove = min(getlspace(line), -diff) after.append(line[remove:]) return self.raw != self.after def write(self, f): f.writelines(self.after) # Line-getter for tokenize. def getline(self): if self.index >= len(self.lines): line = "" else: line = self.lines[self.index] self.index += 1 return line # Line-eater for tokenize. def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((sline, -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((sline, self.level)) # Count number of leading blanks. def getlspace(line): i, n = 0, len(line) while i < n and line[i] == " ": i += 1 return i if __name__ == '__main__': main()
gpl-2.0
nicholasserra/sentry
src/sentry/services/smtp.py
36
3120
""" sentry.services.smtp ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function import asyncore import email import logging from smtpd import SMTPServer, SMTPChannel from email_reply_parser import EmailReplyParser from sentry.services.base import Service from sentry.tasks.email import process_inbound_email from sentry.utils.email import email_to_group_id logger = logging.getLogger(__name__) # HACK(mattrobenolt): literally no idea what I'm doing. Mostly made this up. # SMTPChannel doesn't support EHLO response, but nginx requires an EHLO. # EHLO is available in python 3, so this is backported somewhat def smtp_EHLO(self, arg): if not arg: self.push('501 Syntax: EHLO hostname') return if self._SMTPChannel__greeting: self.push('503 Duplicate HELO/EHLO') else: self._SMTPChannel__greeting = arg self.push('250 %s' % self._SMTPChannel__fqdn) SMTPChannel.smtp_EHLO = smtp_EHLO STATUS = { 200: '200 Ok', 550: '550 Not found', 552: '552 Message too long', } class SentrySMTPServer(Service, SMTPServer): name = 'smtp' max_message_length = 20000 # This might be too conservative def __init__(self, host=None, port=None, debug=False, workers=None): from django.conf import settings self.host = host or getattr(settings, 'SENTRY_SMTP_HOST', '0.0.0.0') self.port = port or getattr(settings, 'SENTRY_SMTP_PORT', 1025) def process_message(self, peer, mailfrom, rcpttos, raw_message): logger.info('Incoming message received from %s', mailfrom) if not len(rcpttos): logger.info('Incoming email had no recipients. Ignoring.') return STATUS[550] if len(raw_message) > self.max_message_length: logger.info('Inbound email message was too long: %d', len(raw_message)) return STATUS[552] try: group_id = email_to_group_id(rcpttos[0]) except Exception: logger.info('%r is not a valid email address', rcpttos) return STATUS[550] message = email.message_from_string(raw_message) payload = None if message.is_multipart(): for msg in message.walk(): if msg.get_content_type() == 'text/plain': payload = msg.get_payload() break if payload is None: # No text/plain part, bailing return STATUS[200] else: payload = message.get_payload() payload = EmailReplyParser.parse_reply(payload).strip() if not payload: # If there's no body, we don't need to go any further return STATUS[200] process_inbound_email.delay(mailfrom, group_id, payload) return STATUS[200] def run(self): SMTPServer.__init__(self, (self.host, self.port), None) try: asyncore.loop() except KeyboardInterrupt: pass
bsd-3-clause
seem-sky/kbengine
kbe/res/scripts/common/Lib/lib2to3/pgen2/tokenize.py
80
19321
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. # All rights reserved. """Tokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" __author__ = 'Ka-Ping Yee <[email protected]>' __credits__ = \ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' import string, re from codecs import BOM_UTF8, lookup from lib2to3.pgen2.token import * from . import token __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", "generate_tokens", "untokenize"] del token try: bytes except NameError: # Support bytes type in Python <= 2.5, so 2to3 turns itself into # valid Python 3 code. bytes = str def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'[a-zA-Z_]\w*' Binnumber = r'0[bB][01]*' Hexnumber = r'0[xX][\da-fA-F]*[lL]?' Octnumber = r'0[oO]?[0-7]*[lL]?' Decnumber = r'[1-9]\d*[lL]?' Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?\d+' Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) Expfloat = r'\d+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') # Single-line ' or " string. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", r"//=?", r"->", r"[+\-*/%&@|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'[:;.,`@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) tokenprog, pseudoprog, single3prog, double3prog = list(map( re.compile, (Token, PseudoToken, Single3, Double3))) endprogs = {"'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, 'r': None, 'R': None, 'u': None, 'U': None, 'b': None, 'B': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "b'''", 'b"""', "B'''", 'B"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""',): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "b'", 'b"', "B'", 'B"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"', ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing (srow, scol) = xxx_todo_changeme (erow, ecol) = xxx_todo_changeme1 print("%d,%d-%d,%d:\t%s\t%s" % \ (srow, scol, erow, ecol, tok_name[type], repr(token))) def tokenize(readline, tokeneater=printtoken): """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) except StopTokenizing: pass # backwards compatible interface def tokenize_loop(readline, tokeneater): for token_info in generate_tokens(readline): tokeneater(*token_info) class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 def add_whitespace(self, start): row, col = start assert row <= self.prev_row col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): for t in iterable: if len(t) == 2: self.compat(t, iterable) break tok_type, token, start, end, line = t self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): startline = False indents = [] toks_append = self.tokens.append toknum, tokval = token if toknum in (NAME, NUMBER): tokval += ' ' if toknum in (NEWLINE, NL): startline = True for tok in iterable: toknum, tokval = tok[:2] if toknum in (NAME, NUMBER): tokval += ' ' if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return bytes() def find_cookie(line): try: line_string = line.decode('ascii') except UnicodeDecodeError: return None match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def untokenize(iterable): """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 """ ut = Untokenizer() return ut.untokenize(iterable) def generate_tokens(readline): """ The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': newline = NEWLINE if parenlev > 0: newline = NL yield (newline, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt # This yield is new; needed for better idempotency: yield (NL, token, spos, (lnum, pos), line) continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') if __name__ == '__main__': # testing import sys if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: tokenize(sys.stdin.readline)
lgpl-3.0
luoyetx/mxnet
example/ssd/data/demo/download_demo_images.py
24
1663
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os wd = os.path.dirname(os.path.realpath(__file__)) def download(url, target): os.system("wget {} -O {}".format(url, target)) if __name__ == "__main__": base_url = "https://cloud.githubusercontent.com/assets/3307514/" demo_list = {"20012566/cbb53c76-a27d-11e6-9aaa-91939c9a1cd5.jpg":"000001.jpg", "20012564/cbb43894-a27d-11e6-9619-ba792b66c4ae.jpg": "000002.jpg", "20012565/cbb53942-a27d-11e6-996c-125bb060a81d.jpg": "000004.jpg", "20012562/cbb4136e-a27d-11e6-884c-ed83c165b422.jpg": "000010.jpg", "20012567/cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg": "dog.jpg", "20012563/cbb41382-a27d-11e6-92a9-18dab4fd1ad3.jpg": "person.jpg", "20012568/cbc2d6f6-a27d-11e6-94c3-d35a9cb47609.jpg": "street.jpg"} for k, v in demo_list.items(): download(base_url + k, os.path.join(wd, v))
apache-2.0
CARocha/addac_fadcanic
encuesta/migrations/0005_auto__add_cultivosanuales__add_productoanimal__add_productoprocesado__.py
2
26202
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CultivosAnuales' db.create_table(u'encuesta_cultivosanuales', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)), ('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'encuesta', ['CultivosAnuales']) # Adding model 'ProductoAnimal' db.create_table(u'encuesta_productoanimal', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)), ('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'encuesta', ['ProductoAnimal']) # Adding model 'ProductoProcesado' db.create_table(u'encuesta_productoprocesado', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)), ('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'encuesta', ['ProductoProcesado']) # Deleting field 'SeguridadCAnuales.unidad_medida' db.delete_column(u'encuesta_seguridadcanuales', 'unidad_medida') # Renaming column for 'SeguridadCAnuales.cultivos' to match new field type. db.rename_column(u'encuesta_seguridadcanuales', 'cultivos', 'cultivos_id') # Changing field 'SeguridadCAnuales.cultivos' db.alter_column(u'encuesta_seguridadcanuales', 'cultivos_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.CultivosAnuales'])) # Adding index on 'SeguridadCAnuales', fields ['cultivos'] db.create_index(u'encuesta_seguridadcanuales', ['cultivos_id']) # Deleting field 'SeguridadPProcesados.unidad_medida' db.delete_column(u'encuesta_seguridadpprocesados', 'unidad_medida') # Renaming column for 'SeguridadPProcesados.producto' to match new field type. db.rename_column(u'encuesta_seguridadpprocesados', 'producto', 'producto_id') # Changing field 'SeguridadPProcesados.producto' db.alter_column(u'encuesta_seguridadpprocesados', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoProcesado'])) # Adding index on 'SeguridadPProcesados', fields ['producto'] db.create_index(u'encuesta_seguridadpprocesados', ['producto_id']) # Deleting field 'SeguridadPAnimal.unidad_medida' db.delete_column(u'encuesta_seguridadpanimal', 'unidad_medida') # Renaming column for 'SeguridadPAnimal.producto' to match new field type. db.rename_column(u'encuesta_seguridadpanimal', 'producto', 'producto_id') # Changing field 'SeguridadPAnimal.producto' db.alter_column(u'encuesta_seguridadpanimal', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoAnimal'])) # Adding index on 'SeguridadPAnimal', fields ['producto'] db.create_index(u'encuesta_seguridadpanimal', ['producto_id']) def backwards(self, orm): # Removing index on 'SeguridadPAnimal', fields ['producto'] db.delete_index(u'encuesta_seguridadpanimal', ['producto_id']) # Removing index on 'SeguridadPProcesados', fields ['producto'] db.delete_index(u'encuesta_seguridadpprocesados', ['producto_id']) # Removing index on 'SeguridadCAnuales', fields ['cultivos'] db.delete_index(u'encuesta_seguridadcanuales', ['cultivos_id']) # Deleting model 'CultivosAnuales' db.delete_table(u'encuesta_cultivosanuales') # Deleting model 'ProductoAnimal' db.delete_table(u'encuesta_productoanimal') # Deleting model 'ProductoProcesado' db.delete_table(u'encuesta_productoprocesado') # Adding field 'SeguridadCAnuales.unidad_medida' db.add_column(u'encuesta_seguridadcanuales', 'unidad_medida', self.gf('django.db.models.fields.IntegerField')(default=1), keep_default=False) # Renaming column for 'SeguridadCAnuales.cultivos' to match new field type. db.rename_column(u'encuesta_seguridadcanuales', 'cultivos_id', 'cultivos') # Changing field 'SeguridadCAnuales.cultivos' db.alter_column(u'encuesta_seguridadcanuales', 'cultivos', self.gf('django.db.models.fields.IntegerField')()) # Adding field 'SeguridadPProcesados.unidad_medida' db.add_column(u'encuesta_seguridadpprocesados', 'unidad_medida', self.gf('django.db.models.fields.IntegerField')(default=1), keep_default=False) # Renaming column for 'SeguridadPProcesados.producto' to match new field type. db.rename_column(u'encuesta_seguridadpprocesados', 'producto_id', 'producto') # Changing field 'SeguridadPProcesados.producto' db.alter_column(u'encuesta_seguridadpprocesados', 'producto', self.gf('django.db.models.fields.IntegerField')()) # Adding field 'SeguridadPAnimal.unidad_medida' db.add_column(u'encuesta_seguridadpanimal', 'unidad_medida', self.gf('django.db.models.fields.IntegerField')(default=1), keep_default=False) # Renaming column for 'SeguridadPAnimal.producto' to match new field type. db.rename_column(u'encuesta_seguridadpanimal', 'producto_id', 'producto') # Changing field 'SeguridadPAnimal.producto' db.alter_column(u'encuesta_seguridadpanimal', 'producto', self.gf('django.db.models.fields.IntegerField')()) models = { u'encuesta.credito': { 'Meta': {'object_name': 'Credito'}, 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.OrganizacionesDanCredito']"}), 'personas': ('django.db.models.fields.IntegerField', [], {}), 'uso': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['encuesta.UsoCredito']", 'symmetrical': 'False'}) }, u'encuesta.cultivosanuales': { 'Meta': {'object_name': 'CultivosAnuales'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'encuesta.cultivossaf': { 'Meta': {'object_name': 'CultivosSaf'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'encuesta.educacion': { 'Meta': {'ordering': "(u'sexo_edad',)", 'object_name': 'Educacion'}, 'circ_estudio_adulto': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), 'estudiando': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nosabe_leer': ('django.db.models.fields.IntegerField', [], {}), 'num_persona': ('django.db.models.fields.IntegerField', [], {}), 'pri_completa': ('django.db.models.fields.IntegerField', [], {}), 'pri_incompleta': ('django.db.models.fields.IntegerField', [], {}), 'secu_completa': ('django.db.models.fields.IntegerField', [], {}), 'secu_incompleta': ('django.db.models.fields.IntegerField', [], {}), 'sexo_edad': ('django.db.models.fields.IntegerField', [], {}), 'uni_o_tecnico': ('django.db.models.fields.IntegerField', [], {}) }, u'encuesta.encuesta': { 'Meta': {'object_name': 'Encuesta'}, 'ano': ('django.db.models.fields.IntegerField', [], {}), 'fecha': ('django.db.models.fields.DateField', [], {}), 'fecha2': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'oficina': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'personas': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}) }, u'encuesta.finca': { 'Meta': {'ordering': "(u'finca',)", 'object_name': 'Finca'}, 'animal_aves': ('django.db.models.fields.IntegerField', [], {}), 'animal_bovino': ('django.db.models.fields.IntegerField', [], {}), 'animal_caprino': ('django.db.models.fields.IntegerField', [], {}), 'animal_equino': ('django.db.models.fields.IntegerField', [], {}), 'animal_porcino': ('django.db.models.fields.IntegerField', [], {}), 'area_casa': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}), 'area_finca': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}), 'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}), 'coordenadas_gps': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}), 'coordenadas_lg': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), 'finca': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}), 'fuente_agua': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'legalidad': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}), 'microcuenca': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Microcuenca']"}), 'municipio': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'municipio'", 'to': u"orm['lugar.Municipio']"}), 'nombre_productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'productores'", 'to': u"orm['encuesta.Productores']"}), 'propietario': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'tipo_casa': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}) }, u'encuesta.fotos': { 'Meta': {'object_name': 'Fotos'}, 'adjunto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'encuesta.ingresoservicionegocio': { 'Meta': {'object_name': 'IngresoServicioNegocio'}, 'cantidad': ('django.db.models.fields.IntegerField', [], {}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ingresos': ('django.db.models.fields.FloatField', [], {}), 'maneja': ('django.db.models.fields.IntegerField', [], {}), 'plan_negocio': ('django.db.models.fields.IntegerField', [], {}), 'precio': ('django.db.models.fields.FloatField', [], {}), 'servicios': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ServiciosActividades']"}) }, u'encuesta.innovacion': { 'Meta': {'object_name': 'Innovacion'}, 'aplica': ('django.db.models.fields.IntegerField', [], {}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.TipoInnovacion']"}) }, u'encuesta.organizacionesdancredito': { 'Meta': {'object_name': 'OrganizacionesDanCredito'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'encuesta.productoanimal': { 'Meta': {'object_name': 'ProductoAnimal'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'encuesta.productoprocesado': { 'Meta': {'object_name': 'ProductoProcesado'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'encuesta.productores': { 'Meta': {'object_name': 'Productores'}, 'cedula_productor': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}), 'celular': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'contador': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'sexo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'encuesta.recolector': { 'Meta': {'unique_together': "((u'nombre',),)", 'object_name': 'Recolector'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'encuesta.seguridadalimentaria': { 'Meta': {'object_name': 'SeguridadAlimentaria'}, 'alimentos': ('django.db.models.fields.IntegerField', [], {}), 'comprar': ('django.db.models.fields.BooleanField', [], {}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nivel_consumo_suficiente': ('django.db.models.fields.IntegerField', [], {}), 'porcentaje_compran': ('django.db.models.fields.IntegerField', [], {}), 'porcentaje_nivel': ('django.db.models.fields.IntegerField', [], {}) }, u'encuesta.seguridadcanuales': { 'Meta': {'object_name': 'SeguridadCAnuales'}, 'area_produccion': ('django.db.models.fields.FloatField', [], {}), 'auto_consumo': ('django.db.models.fields.FloatField', [], {}), 'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosAnuales']"}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'perdidas': ('django.db.models.fields.FloatField', [], {}), 'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}), 'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}), 'produccion': ('django.db.models.fields.FloatField', [], {}), 'venta_no': ('django.db.models.fields.FloatField', [], {}), 'venta_organizada': ('django.db.models.fields.FloatField', [], {}) }, u'encuesta.seguridadpanimal': { 'Meta': {'object_name': 'SeguridadPAnimal'}, 'auto_consumo': ('django.db.models.fields.FloatField', [], {}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maneja': ('django.db.models.fields.IntegerField', [], {}), 'perdidas': ('django.db.models.fields.FloatField', [], {}), 'plan_negocio': ('django.db.models.fields.IntegerField', [], {}), 'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}), 'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}), 'produccion': ('django.db.models.fields.FloatField', [], {}), 'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoAnimal']"}), 'venta_no': ('django.db.models.fields.FloatField', [], {}), 'venta_organizada': ('django.db.models.fields.FloatField', [], {}) }, u'encuesta.seguridadpprocesados': { 'Meta': {'object_name': 'SeguridadPProcesados'}, 'auto_consumo': ('django.db.models.fields.FloatField', [], {}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maneja': ('django.db.models.fields.IntegerField', [], {}), 'perdidas': ('django.db.models.fields.FloatField', [], {}), 'plan_negocio': ('django.db.models.fields.IntegerField', [], {}), 'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}), 'produccion': ('django.db.models.fields.FloatField', [], {}), 'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoProcesado']"}), 'venta_no': ('django.db.models.fields.FloatField', [], {}), 'venta_organizada': ('django.db.models.fields.FloatField', [], {}) }, u'encuesta.seguridadsaf': { 'Meta': {'object_name': 'SeguridadSaf'}, 'area_desarrollo': ('django.db.models.fields.FloatField', [], {}), 'area_produccion': ('django.db.models.fields.FloatField', [], {}), 'auto_consumo': ('django.db.models.fields.FloatField', [], {}), 'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosSaf']"}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'perdidas': ('django.db.models.fields.FloatField', [], {}), 'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}), 'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}), 'produccion_total': ('django.db.models.fields.FloatField', [], {}), 'rendimiento': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'venta_no': ('django.db.models.fields.FloatField', [], {}), 'venta_organizada': ('django.db.models.fields.FloatField', [], {}) }, u'encuesta.serviciosactividades': { 'Meta': {'object_name': 'ServiciosActividades'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'unidad': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'encuesta.tipoinnovacion': { 'Meta': {'object_name': 'TipoInnovacion'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'encuesta.usocredito': { 'Meta': {'object_name': 'UsoCredito'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'encuesta.usotierra': { 'Meta': {'object_name': 'UsoTierra'}, 'anuales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'bosque_primario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'bosque_secundario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'cultivos_anuales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'cultivos_perennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'cultivos_semiperennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}), 'forestales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'perennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'plantaciones_forestales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'potrero_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'potrero_sin_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'primario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'secundario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'semiperennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'sin_arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'tacotal': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'tacotal_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'total_uso': ('django.db.models.fields.FloatField', [], {}) }, u'lugar.comunidad': { 'Meta': {'object_name': 'Comunidad'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'}) }, u'lugar.departamento': { 'Meta': {'object_name': 'Departamento'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'}) }, u'lugar.microcuenca': { 'Meta': {'object_name': 'Microcuenca'}, 'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'lugar.municipio': { 'Meta': {'object_name': 'Municipio'}, 'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'}) } } complete_apps = ['encuesta']
gpl-3.0
bopo/django-userena
userena/utils.py
4
6483
from django.conf import settings from django.utils.encoding import smart_bytes from django.utils.functional import allow_lazy from django.utils.http import urlencode from django.utils.six import text_type from django.utils.text import Truncator from userena import settings as userena_settings from userena.compat import SiteProfileNotAvailable, get_model from hashlib import sha1, md5 import random, datetime import warnings def truncate_words(s, num, end_text='...'): truncate = end_text and ' %s' % end_text or '' return Truncator(s).words(num, truncate=truncate) truncate_words = allow_lazy(truncate_words, text_type) def get_gravatar(email, size=80, default='identicon'): """ Get's a Gravatar for a email address. :param size: The size in pixels of one side of the Gravatar's square image. Optional, if not supplied will default to ``80``. :param default: Defines what should be displayed if no image is found for this user. Optional argument which defaults to ``identicon``. The argument can be a URI to an image or one of the following options: ``404`` Do not load any image if none is associated with the email hash, instead return an HTTP 404 (File Not Found) response. ``mm`` Mystery-man, a simple, cartoon-style silhouetted outline of a person (does not vary by email hash). ``identicon`` A geometric pattern based on an email hash. ``monsterid`` A generated 'monster' with different colors, faces, etc. ``wavatar`` Generated faces with differing features and backgrounds :return: The URI pointing to the Gravatar. """ if userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE: base_url = 'https://secure.gravatar.com/avatar/' else: base_url = '//www.gravatar.com/avatar/' gravatar_url = '%(base_url)s%(gravatar_id)s?' % \ {'base_url': base_url, 'gravatar_id': md5(email.lower().encode('utf-8')).hexdigest()} gravatar_url += urlencode({ 's': str(size), 'd': default }) return gravatar_url def signin_redirect(redirect=None, user=None): """ Redirect user after successful sign in. First looks for a ``requested_redirect``. If not supplied will fall-back to the user specific account page. If all fails, will fall-back to the standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining the URI to go next. :param redirect: A value normally supplied by ``next`` form field. Gets preference before the default view which requires the user. :param user: A ``User`` object specifying the user who has just signed in. :return: String containing the URI to redirect to. """ if redirect: return redirect elif user is not None: return userena_settings.USERENA_SIGNIN_REDIRECT_URL % \ {'username': user.username} else: return settings.LOGIN_REDIRECT_URL def generate_sha1(string, salt=None): """ Generates a sha1 hash for supplied string. Doesn't need to be very secure because it's not used for password checking. We got Django for that. :param string: The string that needs to be encrypted. :param salt: Optionally define your own salt. If none is supplied, will use a random string of 5 characters. :return: Tuple containing the salt and hash. """ if not isinstance(string, (str, text_type)): string = str(string) if not salt: salt = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5] salted_bytes = (smart_bytes(salt) + smart_bytes(string)) hash_ = sha1(salted_bytes).hexdigest() return salt, hash_ def get_profile_model(): """ Return the model class for the currently-active user profile model, as defined by the ``AUTH_PROFILE_MODULE`` setting. :return: The model that is used as profile. """ if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \ (not settings.AUTH_PROFILE_MODULE): raise SiteProfileNotAvailable try: profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1)) except LookupError: profile_mod = None if profile_mod is None: raise SiteProfileNotAvailable return profile_mod def get_user_profile(user): profile_model = get_profile_model() try: profile = user.get_profile() except AttributeError: related_name = profile_model._meta.get_field('user')\ .related_query_name() profile = getattr(user, related_name, None) except profile_model.DoesNotExist: profile = None if profile: return profile return profile_model.objects.create(user=user) def get_protocol(): """ Returns a string with the current protocol. This can be either 'http' or 'https' depending on ``USERENA_USE_HTTPS`` setting. """ protocol = 'http' if getattr(settings, 'USERENA_USE_HTTPS', userena_settings.DEFAULT_USERENA_USE_HTTPS): protocol = 'https' return protocol def get_datetime_now(): """ Returns datetime object with current point in time. In Django 1.4+ it uses Django's django.utils.timezone.now() which returns an aware or naive datetime that represents the current point in time when ``USE_TZ`` in project's settings is True or False respectively. In older versions of Django it uses datetime.datetime.now(). """ try: from django.utils import timezone return timezone.now() # pragma: no cover except ImportError: # pragma: no cover return datetime.datetime.now() # Django 1.5 compatibility utilities, providing support for custom User models. # Since get_user_model() causes a circular import if called when app models are # being loaded, the user_model_label should be used when possible, with calls # to get_user_model deferred to execution time user_model_label = getattr(settings, 'AUTH_USER_MODEL', 'auth.User') def get_user_model(): warnings.warn("Use Django's django.contrib.auth.get_user_model directly. " "This function will be removed in future versions of " "django-userena.", DeprecationWarning) from django.contrib.auth import get_user_model return get_user_model()
bsd-3-clause
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/django/contrib/sessions/middleware.py
129
3423
import time from importlib import import_module from django.conf import settings from django.contrib.sessions.backends.base import UpdateError from django.core.exceptions import SuspiciousOperation from django.utils.cache import patch_vary_headers from django.utils.deprecation import MiddlewareMixin from django.utils.http import cookie_date class SessionMiddleware(MiddlewareMixin): def __init__(self, get_response=None): self.get_response = get_response engine = import_module(settings.SESSION_ENGINE) self.SessionStore = engine.SessionStore def process_request(self, request): session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME) request.session = self.SessionStore(session_key) def process_response(self, request, response): """ If request.session was modified, or if the configuration is to save the session every time, save the changes and set a session cookie or delete the session cookie if the session has been emptied. """ try: accessed = request.session.accessed modified = request.session.modified empty = request.session.is_empty() except AttributeError: pass else: # First check if we need to delete this cookie. # The session should be deleted only if the session is entirely empty if settings.SESSION_COOKIE_NAME in request.COOKIES and empty: response.delete_cookie( settings.SESSION_COOKIE_NAME, path=settings.SESSION_COOKIE_PATH, domain=settings.SESSION_COOKIE_DOMAIN, ) else: if accessed: patch_vary_headers(response, ('Cookie',)) if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty: if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = cookie_date(expires_time) # Save the session data and refresh the client cookie. # Skip session save for 500 responses, refs #3881. if response.status_code != 500: try: request.session.save() except UpdateError: raise SuspiciousOperation( "The request's session was deleted before the " "request completed. The user may have logged " "out in a concurrent request, for example." ) response.set_cookie( settings.SESSION_COOKIE_NAME, request.session.session_key, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None, ) return response
mit
douglaskastle/mutagen
tests/test_trueaudio.py
1
1544
# -*- coding: utf-8 -*- import os import shutil from mutagen.trueaudio import TrueAudio, delete from mutagen.id3 import TIT1 from tests import TestCase, DATA_DIR from tempfile import mkstemp class TTrueAudio(TestCase): def setUp(self): self.audio = TrueAudio(os.path.join(DATA_DIR, "empty.tta")) def test_tags(self): self.failUnless(self.audio.tags is None) def test_length(self): self.failUnlessAlmostEqual(self.audio.info.length, 3.7, 1) def test_sample_rate(self): self.failUnlessEqual(44100, self.audio.info.sample_rate) def test_not_my_file(self): filename = os.path.join(DATA_DIR, "empty.ogg") self.failUnlessRaises(IOError, TrueAudio, filename) def test_module_delete(self): delete(os.path.join(DATA_DIR, "empty.tta")) def test_delete(self): self.audio.delete() self.failIf(self.audio.tags) def test_pprint(self): self.failUnless(self.audio.pprint()) def test_save_reload(self): try: fd, filename = mkstemp(suffix='.tta') os.close(fd) shutil.copy(self.audio.filename, filename) audio = TrueAudio(filename) audio.add_tags() audio.tags.add(TIT1(encoding=0, text="A Title")) audio.save() audio = TrueAudio(filename) self.failUnlessEqual(audio["TIT1"], "A Title") finally: os.unlink(filename) def test_mime(self): self.failUnless("audio/x-tta" in self.audio.mime)
gpl-2.0
astropy/astropy
astropy/units/misc.py
8
3393
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines miscellaneous units. They are also available in the `astropy.units` namespace. """ from . import si from astropy.constants import si as _si from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes, set_enabled_units) # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # AREAS def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True, doc="barn: unit of area used in HEP") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad, namespace=_ns, prefixes=False, doc="cycle: angular measurement, a full turn or rotation") def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr, namespace=_ns, prefixes=False, doc="spat: the solid angle of the sphere, 4pi sr") ########################################################################## # PRESSURE def_unit(['bar'], 1e5 * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="bar: pressure") # The torr is almost the same as mmHg but not quite. # See https://en.wikipedia.org/wiki/Torr # Define the unit here despite it not being an astrophysical unit. # It may be moved if more similar units are created later. def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="Unit of pressure based on an absolute scale, now defined as " "exactly 1/760 of a standard atmosphere") ########################################################################### # MASS def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass", format={'latex': r'M_{p}', 'unicode': 'Mₚ'}) def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass", format={'latex': r'M_{e}', 'unicode': 'Mₑ'}) # Unified atomic mass unit def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns, prefixes=True, exclude_prefixes=['a', 'da'], doc="Unified atomic mass unit") ########################################################################### # COMPUTER def_unit((['bit', 'b'], ['bit']), namespace=_ns, prefixes=si_prefixes + binary_prefixes) def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns, format={'vounit': 'byte'}, prefixes=si_prefixes + binary_prefixes, exclude_prefixes=['d']) def_unit((['pix', 'pixel'], ['pixel']), format={'ogip': 'pixel', 'vounit': 'pixel'}, namespace=_ns, prefixes=True) def_unit((['vox', 'voxel'], ['voxel']), format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'}, namespace=_ns, prefixes=True) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
bsd-3-clause
mdaniel/intellij-community
python/helpers/third_party/thriftpy/_shaded_ply/yacc.py
99
135805
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2015, # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup ([email protected]), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- import re import types import sys import os.path import inspect import base64 import warnings __version__ = '3.8' __tabversion__ = '3.8' #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = True # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = False # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files # String type-checking compatibility if sys.version_info[0] < 3: string_types = basestring else: string_types = str MAXINT = sys.maxsize # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self, f): self.f = f def debug(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') info = debug def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit] + ' ...' result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return '<%s @ 0x%x>' % (type(r).__name__, id(r)) # Panic mode error recovery support. This feature is being reworked--much of the # code here is to offer a deprecation/backwards compatible transition _errok = None _token = None _restart = None _warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). Instead, invoke the methods on the associated parser instance: def p_error(p): ... # Use parser.errok(), parser.token(), parser.restart() ... parser = yacc.yacc() ''' def errok(): warnings.warn(_warnmsg) return _errok() def restart(): warnings.warn(_warnmsg) return _restart() def token(): warnings.warn(_warnmsg) return _token() # Utility function to call the p_error() function with some deprecation hacks def call_errorfunc(errorfunc, token, parser): global _errok, _token, _restart _errok = parser.errok _token = parser.token _restart = parser.restart r = errorfunc(token) try: del _errok, _token, _restart except NameError: pass return r #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self, s, stack=None): self.slice = s self.stack = stack self.lexer = None self.parser = None def __getitem__(self, n): if isinstance(n, slice): return [s.value for s in self.slice[n]] elif n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self, n, v): self.slice[n].value = v def __getslice__(self, i, j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self, n): return getattr(self.slice[n], 'lineno', 0) def set_lineno(self, n, lineno): self.slice[n].lineno = lineno def linespan(self, n): startline = getattr(self.slice[n], 'lineno', 0) endline = getattr(self.slice[n], 'endlineno', startline) return startline, endline def lexpos(self, n): return getattr(self.slice[n], 'lexpos', 0) def lexspan(self, n): startpos = getattr(self.slice[n], 'lexpos', 0) endpos = getattr(self.slice[n], 'endlexpos', startpos) return startpos, endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self, lrtab, errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf self.set_defaulted_states() self.errorok = True def errok(self): self.errorok = True def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) # Defaulted state support. # This method identifies parser states where there is only one possible reduction action. # For such states, the parser can make a choose to make a rule reduction without consuming # the next look-ahead token. This delayed invocation of the tokenizer can be useful in # certain kinds of advanced parsing situations where the lexer and parser interact with # each other or change states (i.e., manipulation of scope, lexer states, etc.). # # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions def set_defaulted_states(self): self.defaulted_states = {} for state, actions in self.action.items(): rules = list(actions.values()) if len(rules) == 1 and rules[0] < 0: self.defaulted_states[state] = rules[0] def disable_defaulted_states(self): self.defaulted_states = {} def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): if debug or yaccdevel: if isinstance(debug, int): debug = PlyLogger(sys.stderr) return self.parsedebug(input, lexer, debug, tracking, tokenfunc) elif tracking: return self.parseopt(input, lexer, debug, tracking, tokenfunc) else: return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. Optimized versions of this function # are automatically created by the ply/ygen.py script. This script cuts out # sections enclosed in markers such as this: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parsedebug-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery #--! DEBUG debug.info('PLY: PARSE DEBUG START') #--! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer #--! DEBUG debug.debug('') debug.debug('State : %s', state) #--! DEBUG if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] #--! DEBUG debug.debug('Defaulted state %s: Reduce using %d', state, -t) #--! DEBUG #--! DEBUG debug.debug('Stack : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t #--! DEBUG debug.debug('Action : Shift and goto state %s', t) #--! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None #--! DEBUG if plen: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', goto[statestack[-1-plen]][pname]) else: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], goto[statestack[-1]][pname]) #--! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) #--! DEBUG debug.info('Done : Returning %s', format_result(result)) debug.info('PLY: PARSE DEBUG END') #--! DEBUG return result if t is None: #--! DEBUG debug.error('Error : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parsedebug-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! # This code is automatically generated by the ply/ygen.py script. Make # changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated # by the ply/ygen.py script. Make changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-notrack-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-notrack-end # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) else: self.str = '%s -> <empty>' % self.name def __str__(self): return self.str def __repr__(self): return 'Production(' + str(self) + ')' def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self, index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self, n): if n > len(self.prod): return None p = LRItem(self, n) # Precompute the list of productions immediately following. try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError, KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self, str, name, len, func, file, line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return 'MiniProduction(%s)' % self.str # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self, p, n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = {} self.prod.insert(n, '.') self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = '%s -> %s' % (self.name, ' '.join(self.prod)) else: s = '%s -> <empty>' % self.name return s def __repr__(self): return 'LRItem(' + str(self) + ')' # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self, terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = {} # A dictionary that is only used to detect duplicate # productions. self.Terminals = {} # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = {} # A dictionary of precomputed FIRST(x) symbols self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self, index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self, term, assoc, level): assert self.Productions == [None], 'Must call set_precedence() before add_production()' if term in self.Precedence: raise GrammarError('Precedence already specified for terminal %r' % term) if assoc not in ['left', 'right', 'nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc, level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self, prodname, syms, func=None, file='', line=0): if prodname in self.Terminals: raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) if prodname == 'error': raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) if not _is_identifier.match(prodname): raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) # Look for literal tokens for n, s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % (file, line, s, prodname)) if c not in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) if syms[-2] != '%prec': raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % (file, line)) precname = syms[-1] prodprec = self.Precedence.get(precname) if not prodprec: raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) else: self.UsedPrecedence.add(precname) del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms, self.Terminals) prodprec = self.Precedence.get(precname, ('right', 0)) # See if the rule is already in the rulemap map = '%s -> %s' % (prodname, syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + 'Previous definition at %s:%d' % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if prodname not in self.Nonterminals: self.Nonterminals[prodname] = [] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if t not in self.Nonterminals: self.Nonterminals[t] = [] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber, prodname, syms, prodprec, func, file, line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [p] # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self, start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError('start symbol %s undefined' % start) self.Productions[0] = Production(0, "S'", [start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if s in reachable: return reachable.add(s) for p in self.Prodnames.get(s, []): for r in p.prod: mark_reachable_from(r) reachable = set() mark_reachable_from(self.Productions[0].prod[0]) return [s for s in self.Nonterminals if s not in reachable] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = True terminates['$end'] = True # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = False # Then propagate termination until no change: while True: some_change = False for (n, pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = False break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = True if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = True some_change = True # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s, term) in terminates.items(): if not term: if s not in self.Prodnames and s not in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if s not in self.Prodnames and s not in self.Terminals and s != 'error': result.append((s, p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s, v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s, v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname, self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self, beta): # We are computing First(x1,x2,x3,...,xn) result = [] for x in beta: x_produces_empty = False # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = True else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while True: some_change = False for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append(f) some_change = True if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self, start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [] if not start: start = self.Productions[1].name self.Follow[start] = ['$end'] while True: didadd = False for p in self.Productions[1:]: # Here is the production set for i, B in enumerate(p.prod): if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = False for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if f == '<empty>': hasempty = True if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while True: if i > len(p): lri = None else: lri = LRItem(p, i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError, KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self, module): if isinstance(module, types.ModuleType): parsetab = module else: exec('import %s' % module) parsetab = sys.modules[module] if parsetab._tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self, filename): try: import cPickle as pickle except ImportError: import pickle if not os.path.exists(filename): raise ImportError in_f = open(filename, 'rb') tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self, pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X, R, FP): N = {} for x in X: N[x] = 0 stack = [] F = {} for x in X: if N[x] == 0: traverse(x, N, stack, F, X, R, FP) return F def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y, N, stack, F, X, R, FP) N[x] = min(N[x], N[y]) for a in F.get(y, []): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self, grammar, method='LALR', log=None): if method not in ['SLR', 'LALR']: raise LALRError('Unsupported method %s' % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self, I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = True while didadd: didadd = False for j in J: for x in j.lr_after: if getattr(x, 'lr0_added', 0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = True return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self, I, x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I), x)) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x) if not s: s = {} self.lr_goto_cache[x] = s gs = [] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n)) if not s1: s1 = {} s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end') if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I), x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = {} for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I, x) if not g or id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = set() num_nullable = 0 while True: for p in self.grammar.Productions[1:]: if p.len == 0: nullable.add(p.name) continue for t in p.prod: if t not in nullable: break else: nullable.add(p.name) if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self, C): trans = [] for stateno, state in enumerate(C): for p in state: if p.lr_index < p.len - 1: t = (stateno, p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self, C, trans, nullable): dr_set = {} state, N = trans terms = [] g = self.lr0_goto(C[state], N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self, C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state], N) j = self.lr0_cidhash.get(id(g), -1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j, a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self, C, trans, nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state, N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j, t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if p.prod[li] not in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j, t)) g = self.lr0_goto(C[j], t) # Go to next set j = self.lr0_cidhash.get(id(g), -1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j, r)) for i in includes: if i not in includedict: includedict[i] = [] includedict[i].append((state, N)) lookdict[(state, N)] = lookb return lookdict, includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self, C, ntrans, nullable): FP = lambda x: self.dr_relation(C, x, nullable) R = lambda x: self.reads_relation(C, x, nullable) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self, ntrans, readsets, inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x, []) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self, lookbacks, followset): for trans, lb in lookbacks.items(): # Loop over productions in lookback for state, p in lb: if state not in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans, []) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self, C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C, trans, nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C, trans, nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans, readsets, included) # Add all of the lookaheads self.add_lookaheads(lookd, followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = {} # Action production array (temporary) log.info('Parsing method: %s', self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [] # List of actions st_action = {} st_actionp = {} st_goto = {} log.info('') log.info('state %d', st) log.info('') for p in I: log.info(' (%d) %s', p.number, p) log.info('') for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action['$end'] = 0 st_actionp['$end'] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) r = st_action.get(a) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. sprec, slevel = Productions[st_actionp[a].number].prec rprec, rlevel = Precedence.get(a, ('right', 0)) if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp, rejectp = pp, oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp, rejectp = oldp, pp self.rr_conflicts.append((st, chosenp, rejectp)) log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', a, st_actionp[a].number, st_actionp[a]) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I, a) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: # We are in a shift state actlist.append((a, p, 'shift and go to state %d' % j)) r = st_action.get(a) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError('Shift/shift conflict in state %d' % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift rprec, rlevel = Productions[st_actionp[a].number].prec sprec, slevel = Precedence.get(a, ('right', 0)) if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = {} for a, p, m in actlist: if a in st_action: if p is st_actionp[a]: log.info(' %-15s %s', a, m) _actprint[(a, m)] = 1 log.info('') # Print the actions that were not used. (debugging) not_used = 0 for a, p, m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a, m) in _actprint: log.debug(' ! %-15s [ %s ]', a, m) not_used = 1 _actprint[(a, m)] = 1 if not_used: log.debug('') # Construct the goto table for this state nkeys = {} for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I, n) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: st_goto[n] = j log.info(' %-30s shift and go to state %d', n, j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self, tabmodule, outputdir='', signature=''): if isinstance(tabmodule, types.ModuleType): raise IOError("Won't overwrite existing tabmodule") basemodulename = tabmodule.split('.')[-1] filename = os.path.join(outputdir, basemodulename) + '.py' try: f = open(filename, 'w') f.write(''' # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = {} for s, nd in self.lr_action.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_action_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items ''') else: f.write('\n_lr_action = { ') for k, v in self.lr_action.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') if smaller: # Factor out names to try and make smaller items = {} for s, nd in self.lr_goto.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_goto_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items ''') else: f.write('\n_lr_goto = { ') for k, v in self.lr_goto.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') # Write production table f.write('_lr_productions = [\n') for p in self.lr_productions: if p.func: f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) f.write(']\n') f.close() except IOError as e: raise # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self, filename, signature=''): try: import cPickle as pickle except ImportError: import pickle with open(filename, 'wb') as outf: pickle.dump(__tabversion__, outf, pickle_protocol) pickle.dump(self.lr_method, outf, pickle_protocol) pickle.dump(signature, outf, pickle_protocol) pickle.dump(self.lr_action, outf, pickle_protocol) pickle.dump(self.lr_goto, outf, pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: outp.append((str(p), p.name, p.len, None, None, None)) pickle.dump(outp, outf, pickle_protocol) # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc, file, line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) grammar.append((file, dline, prodname, syms)) except SyntaxError: raise except Exception: raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self, pdict, log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.modules = set() self.grammar = [] self.error = False if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_modules() return self.error # Compute a signature over the grammar def signature(self): try: from hashlib import md5 except ImportError: from md5 import md5 try: sig = md5() if self.start: sig.update(self.start.encode('latin-1')) if self.prec: sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1')) if self.tokens: sig.update(' '.join(self.tokens).encode('latin-1')) for f in self.pfuncs: if f[3]: sig.update(f[3].encode('latin-1')) except (TypeError, ValueError): pass digest = base64.b16encode(sig.digest()) if sys.version_info[0] >= 3: digest = digest.decode('latin-1') return digest # ----------------------------------------------------------------------------- # validate_modules() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_modules(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for module in self.modules: lines, linen = inspect.getsourcelines(module) counthash = {} for linen, line in enumerate(lines): linen += 1 m = fre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', filename, linen, name, prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start, string_types): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func, types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = True return eline = self.error_func.__code__.co_firstlineno efile = self.error_func.__code__.co_filename module = inspect.getmodule(self.error_func) self.modules.add(module) argcount = self.error_func.__code__.co_argcount - ismethod if argcount != 1: self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) self.error = True # Get the tokens map def get_tokens(self): tokens = self.pdict.get('tokens') if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = True return terminals = set() for n in self.tokens: if n in terminals: self.log.warning('Token %r multiply defined', n) terminals.add(n) # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get('precedence') # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec, (list, tuple)): self.log.error('precedence must be a list or tuple') self.error = True return for level, p in enumerate(self.prec): if not isinstance(p, (list, tuple)): self.log.error('Bad precedence table') self.error = True return if len(p) < 2: self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) self.error = True return assoc = p[0] if not isinstance(assoc, string_types): self.log.error('precedence associativity must be a string') self.error = True return for term in p[1:]: if not isinstance(term, string_types): self.log.error('precedence items must be strings') self.error = True return preclist.append((term, assoc, level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if not name.startswith('p_') or name == 'p_error': continue if isinstance(item, (types.FunctionType, types.MethodType)): line = item.__code__.co_firstlineno module = inspect.getmodule(item) p_functions.append((line, module, name, item.__doc__)) # Sort all of the actions by line number; make sure to stringify # modules to make them sortable, since `line` may not uniquely sort all # p functions p_functions.sort(key=lambda p_function: ( p_function[0], str(p_function[1]), p_function[2], p_function[3])) self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error('no rules of the form p_rulename are defined') self.error = True return for line, module, name, doc in self.pfuncs: file = inspect.getsourcefile(module) func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func.__code__.co_argcount > reqargs: self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) self.error = True elif func.__code__.co_argcount < reqargs: self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) self.error = True elif not func.__doc__: self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', file, line, func.__name__) else: try: parsed_g = parse_grammar(doc, file, line) for g in parsed_g: grammar.append((name, g)) except SyntaxError as e: self.log.error(str(e)) self.error = True # Looks like a valid grammar rule # Mark the file in which defined. self.modules.add(module) # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n, v in self.pdict.items(): if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): continue if n.startswith('t_'): continue if n.startswith('p_') and n != 'p_error': self.log.warning('%r not defined as a function', n) if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): if v.__doc__: try: doc = v.__doc__.split(' ') if doc[1] == ':': self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', v.__code__.co_filename, v.__code__.co_firstlineno, n) except IndexError: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, outputdir=None, debuglog=None, errorlog=None, picklefile=None): if tabmodule is None: tabmodule = tab_module # Reference to the parsing method of the last built parser global parse # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] pdict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in pdict: pdict['__file__'] = sys.modules[pdict['__module__']].__file__ else: pdict = get_caller_module_dict(2) if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If tabmodule specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if isinstance(tabmodule, types.ModuleType): srcfile = tabmodule.__file__ else: if '.' not in tabmodule: srcfile = pdict['__file__'] else: parts = tabmodule.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = pdict.get('__package__') if pkg and isinstance(tabmodule, str): if '.' not in tabmodule: tabmodule = pkg + '.' + tabmodule # Set start symbol if it's specified directly using an argument if start is not None: pdict['start'] = start # Collect parser information from the dictionary pinfo = ParserReflect(pdict, log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError('Unable to build parser') # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser except Exception as e: errorlog.warning('There was a problem loading the table file: %r', e) except VersionError as e: errorlog.warning(str(e)) except ImportError: pass if debuglog is None: if debug: try: debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) except IOError as e: errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) debuglog = NullLogger() else: debuglog = NullLogger() debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) errors = False # Validate the parser information if pinfo.validate_all(): raise YaccError('Unable to build parser') if not pinfo.error_func: errorlog.warning('no p_error() function is defined') # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term, assoc, level) except GrammarError as e: errorlog.warning('%s', e) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname, syms, funcname, file, line) except GrammarError as e: errorlog.error('%s', e) errors = True # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError as e: errorlog.error(str(e)) errors = True if errors: raise YaccError('Unable to build parser') # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) errors = True unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info('') debuglog.info('Unused terminals:') debuglog.info('') for term in unused_terminals: errorlog.warning('Token %r defined, but not used', term) debuglog.info(' %s', term) # Print out all productions to the debug log if debug: debuglog.info('') debuglog.info('Grammar') debuglog.info('') for n, p in enumerate(grammar.Productions): debuglog.info('Rule %-5d %s', n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning('There is 1 unused token') if len(unused_terminals) > 1: errorlog.warning('There are %d unused tokens', len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning('There is 1 unused rule') if len(unused_rules) > 1: errorlog.warning('There are %d unused rules', len(unused_rules)) if debug: debuglog.info('') debuglog.info('Terminals, with rules where they appear') debuglog.info('') terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) debuglog.info('') debuglog.info('Nonterminals, with rules where they appear') debuglog.info('') nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info('') if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning('Symbol %r is unreachable', u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error('Infinite recursion detected for symbol %r', inf) errors = True unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) errors = True if errors: raise YaccError('Unable to build parser') # Run the LRGeneratedTable on the grammar if debug: errorlog.debug('Generating %s tables', method) lr = LRGeneratedTable(grammar, method, debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning('1 shift/reduce conflict') elif num_sr > 1: errorlog.warning('%d shift/reduce conflicts', num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning('1 reduce/reduce conflict') elif num_rr > 1: errorlog.warning('%d reduce/reduce conflicts', num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning('') debuglog.warning('Conflicts:') debuglog.warning('') for state, tok, resolution in lr.sr_conflicts: debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) already_reported = set() for state, rule, rejected in lr.rr_conflicts: if (state, id(rule), id(rejected)) in already_reported: continue debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) debuglog.warning('rejected rule (%s) in state %d', rejected, state) errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) errorlog.warning('rejected rule (%s) in state %d', rejected, state) already_reported.add((state, id(rule), id(rejected))) warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning('Rule (%s) is never reduced', rejected) errorlog.warning('Rule (%s) is never reduced', rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: try: lr.write_table(tabmodule, outputdir, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) # Write a pickled version of the tables if picklefile: try: lr.pickle_table(picklefile, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser
apache-2.0
40223137/w17w17
static/Brython3.1.0-20150301-090019/Lib/fractions.py
722
23203
# Originally contributed by Sjoerd Mullender. # Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>. """Fraction, infinite-precision, real numbers.""" from decimal import Decimal import math import numbers import operator import re import sys __all__ = ['Fraction', 'gcd'] def gcd(a, b): """Calculate the Greatest Common Divisor of a and b. Unless b==0, the result will have the same sign as b (so that when b is divided by it, the result comes out positive). """ while b: a, b = b, a%b return a # Constants related to the hash implementation; hash(x) is based # on the reduction of x modulo the prime _PyHASH_MODULUS. _PyHASH_MODULUS = sys.hash_info.modulus # Value to be used for rationals that reduce to infinity modulo # _PyHASH_MODULUS. _PyHASH_INF = sys.hash_info.inf _RATIONAL_FORMAT = re.compile(r""" \A\s* # optional whitespace at the start, then (?P<sign>[-+]?) # an optional sign, then (?=\d|\.\d) # lookahead for digit or .digit (?P<num>\d*) # numerator (possibly empty) (?: # followed by (?:/(?P<denom>\d+))? # an optional denominator | # or (?:\.(?P<decimal>\d*))? # an optional fractional part (?:E(?P<exp>[-+]?\d+))? # and optional exponent ) \s*\Z # and optional whitespace to finish """, re.VERBOSE | re.IGNORECASE) class Fraction(numbers.Rational): """This class implements rational numbers. In the two-argument form of the constructor, Fraction(8, 6) will produce a rational number equivalent to 4/3. Both arguments must be Rational. The numerator defaults to 0 and the denominator defaults to 1 so that Fraction(3) == 3 and Fraction() == 0. Fractions can also be constructed from: - numeric strings similar to those accepted by the float constructor (for example, '-2.3' or '1e10') - strings of the form '123/456' - float and Decimal instances - other Rational instances (including integers) """ __slots__ = ('_numerator', '_denominator') # We're immutable, so use __new__ not __init__ def __new__(cls, numerator=0, denominator=None): """Constructs a Rational. Takes a string like '3/2' or '1.5', another Rational instance, a numerator/denominator pair, or a float. Examples -------- >>> Fraction(10, -8) Fraction(-5, 4) >>> Fraction(Fraction(1, 7), 5) Fraction(1, 35) >>> Fraction(Fraction(1, 7), Fraction(2, 3)) Fraction(3, 14) >>> Fraction('314') Fraction(314, 1) >>> Fraction('-35/4') Fraction(-35, 4) >>> Fraction('3.1415') # conversion from numeric string Fraction(6283, 2000) >>> Fraction('-47e-2') # string may include a decimal exponent Fraction(-47, 100) >>> Fraction(1.47) # direct construction from float (exact conversion) Fraction(6620291452234629, 4503599627370496) >>> Fraction(2.25) Fraction(9, 4) >>> Fraction(Decimal('1.47')) Fraction(147, 100) """ self = super(Fraction, cls).__new__(cls) if denominator is None: if isinstance(numerator, numbers.Rational): self._numerator = numerator.numerator self._denominator = numerator.denominator return self elif isinstance(numerator, float): # Exact conversion from float value = Fraction.from_float(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, Decimal): value = Fraction.from_decimal(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, str): # Handle construction from strings. m = _RATIONAL_FORMAT.match(numerator) if m is None: raise ValueError('Invalid literal for Fraction: %r' % numerator) numerator = int(m.group('num') or '0') denom = m.group('denom') if denom: denominator = int(denom) else: denominator = 1 decimal = m.group('decimal') if decimal: scale = 10**len(decimal) numerator = numerator * scale + int(decimal) denominator *= scale exp = m.group('exp') if exp: exp = int(exp) if exp >= 0: numerator *= 10**exp else: denominator *= 10**-exp if m.group('sign') == '-': numerator = -numerator else: raise TypeError("argument should be a string " "or a Rational instance") elif (isinstance(numerator, numbers.Rational) and isinstance(denominator, numbers.Rational)): numerator, denominator = ( numerator.numerator * denominator.denominator, denominator.numerator * numerator.denominator ) else: raise TypeError("both arguments should be " "Rational instances") if denominator == 0: raise ZeroDivisionError('Fraction(%s, 0)' % numerator) g = gcd(numerator, denominator) self._numerator = numerator // g self._denominator = denominator // g return self @classmethod def from_float(cls, f): """Converts a finite float to a rational number, exactly. Beware that Fraction.from_float(0.3) != Fraction(3, 10). """ if isinstance(f, numbers.Integral): return cls(f) elif not isinstance(f, float): raise TypeError("%s.from_float() only takes floats, not %r (%s)" % (cls.__name__, f, type(f).__name__)) if math.isnan(f): raise ValueError("Cannot convert %r to %s." % (f, cls.__name__)) if math.isinf(f): raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__)) return cls(*f.as_integer_ratio()) @classmethod def from_decimal(cls, dec): """Converts a finite Decimal instance to a rational number, exactly.""" from decimal import Decimal if isinstance(dec, numbers.Integral): dec = Decimal(int(dec)) elif not isinstance(dec, Decimal): raise TypeError( "%s.from_decimal() only takes Decimals, not %r (%s)" % (cls.__name__, dec, type(dec).__name__)) if dec.is_infinite(): raise OverflowError( "Cannot convert %s to %s." % (dec, cls.__name__)) if dec.is_nan(): raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__)) sign, digits, exp = dec.as_tuple() digits = int(''.join(map(str, digits))) if sign: digits = -digits if exp >= 0: return cls(digits * 10 ** exp) else: return cls(digits, 10 ** -exp) def limit_denominator(self, max_denominator=1000000): """Closest Fraction to self with denominator at most max_denominator. >>> Fraction('3.141592653589793').limit_denominator(10) Fraction(22, 7) >>> Fraction('3.141592653589793').limit_denominator(100) Fraction(311, 99) >>> Fraction(4321, 8765).limit_denominator(10000) Fraction(4321, 8765) """ # Algorithm notes: For any real number x, define a *best upper # approximation* to x to be a rational number p/q such that: # # (1) p/q >= x, and # (2) if p/q > r/s >= x then s > q, for any rational r/s. # # Define *best lower approximation* similarly. Then it can be # proved that a rational number is a best upper or lower # approximation to x if, and only if, it is a convergent or # semiconvergent of the (unique shortest) continued fraction # associated to x. # # To find a best rational approximation with denominator <= M, # we find the best upper and lower approximations with # denominator <= M and take whichever of these is closer to x. # In the event of a tie, the bound with smaller denominator is # chosen. If both denominators are equal (which can happen # only when max_denominator == 1 and self is midway between # two integers) the lower bound---i.e., the floor of self, is # taken. if max_denominator < 1: raise ValueError("max_denominator should be at least 1") if self._denominator <= max_denominator: return Fraction(self) p0, q0, p1, q1 = 0, 1, 1, 0 n, d = self._numerator, self._denominator while True: a = n//d q2 = q0+a*q1 if q2 > max_denominator: break p0, q0, p1, q1 = p1, q1, p0+a*p1, q2 n, d = d, n-a*d k = (max_denominator-q0)//q1 bound1 = Fraction(p0+k*p1, q0+k*q1) bound2 = Fraction(p1, q1) if abs(bound2 - self) <= abs(bound1-self): return bound2 else: return bound1 @property def numerator(a): return a._numerator @property def denominator(a): return a._denominator def __repr__(self): """repr(self)""" return ('Fraction(%s, %s)' % (self._numerator, self._denominator)) def __str__(self): """str(self)""" if self._denominator == 1: return str(self._numerator) else: return '%s/%s' % (self._numerator, self._denominator) def _operator_fallbacks(monomorphic_operator, fallback_operator): """Generates forward and reverse operators given a purely-rational operator and a function from the operator module. Use this like: __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op) In general, we want to implement the arithmetic operations so that mixed-mode operations either call an implementation whose author knew about the types of both arguments, or convert both to the nearest built in type and do the operation there. In Fraction, that means that we define __add__ and __radd__ as: def __add__(self, other): # Both types have numerators/denominator attributes, # so do the operation directly if isinstance(other, (int, Fraction)): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) # float and complex don't have those operations, but we # know about those types, so special case them. elif isinstance(other, float): return float(self) + other elif isinstance(other, complex): return complex(self) + other # Let the other type take over. return NotImplemented def __radd__(self, other): # radd handles more types than add because there's # nothing left to fall back to. if isinstance(other, numbers.Rational): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) elif isinstance(other, Real): return float(other) + float(self) elif isinstance(other, Complex): return complex(other) + complex(self) return NotImplemented There are 5 different cases for a mixed-type addition on Fraction. I'll refer to all of the above code that doesn't refer to Fraction, float, or complex as "boilerplate". 'r' will be an instance of Fraction, which is a subtype of Rational (r : Fraction <: Rational), and b : B <: Complex. The first three involve 'r + b': 1. If B <: Fraction, int, float, or complex, we handle that specially, and all is well. 2. If Fraction falls back to the boilerplate code, and it were to return a value from __add__, we'd miss the possibility that B defines a more intelligent __radd__, so the boilerplate should return NotImplemented from __add__. In particular, we don't handle Rational here, even though we could get an exact answer, in case the other type wants to do something special. 3. If B <: Fraction, Python tries B.__radd__ before Fraction.__add__. This is ok, because it was implemented with knowledge of Fraction, so it can handle those instances before delegating to Real or Complex. The next two situations describe 'b + r'. We assume that b didn't know about Fraction in its implementation, and that it uses similar boilerplate code: 4. If B <: Rational, then __radd_ converts both to the builtin rational type (hey look, that's us) and proceeds. 5. Otherwise, __radd__ tries to find the nearest common base ABC, and fall back to its builtin type. Since this class doesn't subclass a concrete type, there's no implementation to fall back to, so we need to try as hard as possible to return an actual value, or the user will get a TypeError. """ def forward(a, b): if isinstance(b, (int, Fraction)): return monomorphic_operator(a, b) elif isinstance(b, float): return fallback_operator(float(a), b) elif isinstance(b, complex): return fallback_operator(complex(a), b) else: return NotImplemented forward.__name__ = '__' + fallback_operator.__name__ + '__' forward.__doc__ = monomorphic_operator.__doc__ def reverse(b, a): if isinstance(a, numbers.Rational): # Includes ints. return monomorphic_operator(a, b) elif isinstance(a, numbers.Real): return fallback_operator(float(a), float(b)) elif isinstance(a, numbers.Complex): return fallback_operator(complex(a), complex(b)) else: return NotImplemented reverse.__name__ = '__r' + fallback_operator.__name__ + '__' reverse.__doc__ = monomorphic_operator.__doc__ return forward, reverse def _add(a, b): """a + b""" return Fraction(a.numerator * b.denominator + b.numerator * a.denominator, a.denominator * b.denominator) __add__, __radd__ = _operator_fallbacks(_add, operator.add) def _sub(a, b): """a - b""" return Fraction(a.numerator * b.denominator - b.numerator * a.denominator, a.denominator * b.denominator) __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) def _mul(a, b): """a * b""" return Fraction(a.numerator * b.numerator, a.denominator * b.denominator) __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) def _div(a, b): """a / b""" return Fraction(a.numerator * b.denominator, a.denominator * b.numerator) __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) def __floordiv__(a, b): """a // b""" return math.floor(a / b) def __rfloordiv__(b, a): """a // b""" return math.floor(a / b) def __mod__(a, b): """a % b""" div = a // b return a - b * div def __rmod__(b, a): """a % b""" div = a // b return a - b * div def __pow__(a, b): """a ** b If b is not an integer, the result will be a float or complex since roots are generally irrational. If b is an integer, the result will be rational. """ if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power) else: return Fraction(a._denominator ** -power, a._numerator ** -power) else: # A fractional power will generally produce an # irrational number. return float(a) ** float(b) else: return float(a) ** b def __rpow__(b, a): """a ** b""" if b._denominator == 1 and b._numerator >= 0: # If a is an int, keep it that way if possible. return a ** b._numerator if isinstance(a, numbers.Rational): return Fraction(a.numerator, a.denominator) ** b if b._denominator == 1: return a ** b._numerator return a ** float(b) def __pos__(a): """+a: Coerces a subclass instance to Fraction""" return Fraction(a._numerator, a._denominator) def __neg__(a): """-a""" return Fraction(-a._numerator, a._denominator) def __abs__(a): """abs(a)""" return Fraction(abs(a._numerator), a._denominator) def __trunc__(a): """trunc(a)""" if a._numerator < 0: return -(-a._numerator // a._denominator) else: return a._numerator // a._denominator def __floor__(a): """Will be math.floor(a) in 3.0.""" return a.numerator // a.denominator def __ceil__(a): """Will be math.ceil(a) in 3.0.""" # The negations cleverly convince floordiv to return the ceiling. return -(-a.numerator // a.denominator) def __round__(self, ndigits=None): """Will be round(self, ndigits) in 3.0. Rounds half toward even. """ if ndigits is None: floor, remainder = divmod(self.numerator, self.denominator) if remainder * 2 < self.denominator: return floor elif remainder * 2 > self.denominator: return floor + 1 # Deal with the half case: elif floor % 2 == 0: return floor else: return floor + 1 shift = 10**abs(ndigits) # See _operator_fallbacks.forward to check that the results of # these operations will always be Fraction and therefore have # round(). if ndigits > 0: return Fraction(round(self * shift), shift) else: return Fraction(round(self / shift) * shift) def __hash__(self): """hash(self)""" # XXX since this method is expensive, consider caching the result # In order to make sure that the hash of a Fraction agrees # with the hash of a numerically equal integer, float or # Decimal instance, we follow the rules for numeric hashes # outlined in the documentation. (See library docs, 'Built-in # Types'). # dinv is the inverse of self._denominator modulo the prime # _PyHASH_MODULUS, or 0 if self._denominator is divisible by # _PyHASH_MODULUS. dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) if not dinv: hash_ = _PyHASH_INF else: hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS result = hash_ if self >= 0 else -hash_ return -2 if result == -1 else result def __eq__(a, b): """a == b""" if isinstance(b, numbers.Rational): return (a._numerator == b.numerator and a._denominator == b.denominator) if isinstance(b, numbers.Complex) and b.imag == 0: b = b.real if isinstance(b, float): if math.isnan(b) or math.isinf(b): # comparisons with an infinity or nan should behave in # the same way for any finite a, so treat a as zero. return 0.0 == b else: return a == a.from_float(b) else: # Since a doesn't know how to compare with b, let's give b # a chance to compare itself with a. return NotImplemented def _richcmp(self, other, op): """Helper for comparison operators, for internal use only. Implement comparison between a Rational instance `self`, and either another Rational instance or a float `other`. If `other` is not a Rational instance or a float, return NotImplemented. `op` should be one of the six standard comparison operators. """ # convert other to a Rational instance where reasonable. if isinstance(other, numbers.Rational): return op(self._numerator * other.denominator, self._denominator * other.numerator) if isinstance(other, float): if math.isnan(other) or math.isinf(other): return op(0.0, other) else: return op(self, self.from_float(other)) else: return NotImplemented def __lt__(a, b): """a < b""" return a._richcmp(b, operator.lt) def __gt__(a, b): """a > b""" return a._richcmp(b, operator.gt) def __le__(a, b): """a <= b""" return a._richcmp(b, operator.le) def __ge__(a, b): """a >= b""" return a._richcmp(b, operator.ge) def __bool__(a): """a != 0""" return a._numerator != 0 # support for pickling, copy, and deepcopy def __reduce__(self): return (self.__class__, (str(self),)) def __copy__(self): if type(self) == Fraction: return self # I'm immutable; therefore I am my own clone return self.__class__(self._numerator, self._denominator) def __deepcopy__(self, memo): if type(self) == Fraction: return self # My components are also immutable return self.__class__(self._numerator, self._denominator)
gpl-3.0
eonum/medword
model_validation.py
1
11972
import numpy as np import preprocess as pp import os from random import randint from sklearn.decomposition import PCA import matplotlib.pyplot as plt import csv def validate_model(embedding, emb_model_dir, emb_model_fn): print("Start validation. Loading model. \n") # load config config = embedding.config # load model embedding.load_model(emb_model_dir, emb_model_fn) # directories and filenames val_dir = config.config['val_data_dir'] doesntfit_fn = config.config['doesntfit_file'] doesntfit_src = os.path.join(val_dir, doesntfit_fn) synonyms_fn = config.config['synonyms_file'] syn_file_src = os.path.join(val_dir, synonyms_fn) # test with doesn't fit questions test_doesntfit(embedding, doesntfit_src) # test with synonyms # TODO get better syn file (slow, contains many non-significant instances) # test_synonyms(embedding, syn_file_src) # test with human similarity TODO remove hardcoding human_sim_file_src = 'data/validation_data/human_similarity.csv' test_human_similarity(embedding, human_sim_file_src) #### Doesn't Fit Validation #### def doesntfit(embedding, word_list): """ - compares each word-vector to mean of all word-vectors of word_list using the vector dot-product - vector with lowest dot-produt to mean-vector is regarded as the one that dosen't fit """ used_words = [word for word in word_list if embedding.may_construct_word_vec(word)] n_used_words = len(used_words) n_words = len(word_list) if n_used_words != n_words: ignored_words = set(word_list) - set(used_words) print("vectors for words %s are not present in the model, ignoring these words: ", ignored_words) if not used_words: print("cannot select a word from an empty list.") vectors = np.vstack(embedding.word_vec(word) for word in used_words) mean = np.mean(vectors, axis=0) dists = np.dot(vectors, mean) return sorted(zip(dists, used_words))[0][1] def test_doesntfit(embedding, file_src): """ - tests all doesntfit-questions (lines) of file - a doesnt-fit question is of the format "word_1 word_2 ... word_N word_NotFitting" where word_1 to word_n are members of a category but word_NotFitting isn't eg. "Auto Motorrad Fahrrad Ampel" """ # load config config = embedding.config print("Validating 'doesntfit' with file", file_src) num_lines = sum(1 for line in open(file_src)) num_questions = 0 num_right = 0 tokenizer = pp.get_tokenizer(config) # get questions with open(file_src) as f: questions = f.read().splitlines() tk_questions = [tokenizer.tokenize(q) for q in questions] # TODO: check if tokenizer has splitted one word to mulitple words and handle it. # So far no word in the doesnt_fit testfile should be splitted # vocab used to speed checking if word is in vocabulary # (also checked by embedding.may_construct_word_vec(word)) vocab = embedding.get_vocab() # test each question for question in tk_questions: # check if all words exist in vocabulary if all(((word in vocab) or (embedding.may_construct_word_vec(word))) for word in question): num_questions += 1 if doesntfit(embedding, question) == question[-1]: num_right += 1 # calculate result correct_matches = np.round(num_right/np.float(num_questions)*100, 1) if num_questions>0 else 0.0 coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0 # log result print("\n*** Doesn't fit ***") print('Doesn\'t fit correct: {0}% ({1}/{2})'.format(str(correct_matches), str(num_right), str(num_questions))) print('Doesn\'t fit coverage: {0}% ({1}/{2}) \n'.format(str(coverage), str(num_questions), str(num_lines))) #### Synonyms Validation #### def test_synonyms(embedding, file_src): """ - tests all synonym-questions (lines) of file - a synonym-question is of the format "word_1 word_2" where word_1 and word_2 are synonyms eg. "Blutgerinnsel Thrombus" - for word_1 check if it appears in the n closest words of word_2 using "model.cosine(word, n)" and vice-versa - for each synonym-pair TWO CHECKS are made therefore (non-symmetric problem) """ print("Validating 'synonyms' with file", file_src) config = embedding.config num_lines = sum(1 for line in open(file_src)) num_questions = 0 cos_sim_sum_synonyms = 0 tokenizer = pp.get_tokenizer(config) # get questions which are still of lenght 2 after tokenization # TODO: improve for compound words (aaa-bbb) which are splitted by the tokenizer tk_questions = [] with open(file_src, 'r') as f: questions = f.read().splitlines() for q in questions: # synonyms = q.split(';')#tokenizer.tokenize(q) # synonyms = [" ".join(tokenizer.tokenize(synonym)) for synonym in # synonyms] synonyms = tokenizer.tokenize(q) if len(synonyms) == 2: tk_questions.append(synonyms) vocab = embedding.get_vocab() # test each question for tk_quest in tk_questions: # check if all words exist in vocabulary if all(((word in vocab) or embedding.may_construct_word_vec(word)) for word in tk_quest): num_questions += 1 w1 = tk_quest[0] w2 = tk_quest[1] cos_sim_sum_synonyms += embedding.similarity(w1, w2) # compute avg cosine similarity for random vectors to relate to avg_cosine_similarity of synonyms vocab_size = len(vocab) n_vals = 1000 similarity_sum_rand_vec = 0 vals1 = [randint(0, vocab_size -1) for i in range(n_vals)] vals2 = [randint(0, vocab_size -1) for i in range(n_vals)] for v1, v2 in zip(vals1, vals2): similarity_sum_rand_vec += embedding.similarity(vocab[v1], vocab[v2]) avg_cosine_similarity_rand_vec = similarity_sum_rand_vec / np.float(n_vals) # calculate result avg_cosine_similarity_synonyms = (cos_sim_sum_synonyms / num_questions) if num_questions>0 else 0.0 coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0 # log result print("\n*** Cosine-Similarity ***") print("Synonyms avg-cos-similarity (SACS):", avg_cosine_similarity_synonyms, "\nRandom avg-cos-similarity (RACS):", avg_cosine_similarity_rand_vec, "\nRatio SACS/RACS:", avg_cosine_similarity_synonyms/float(avg_cosine_similarity_rand_vec)) print("\n*** Word Coverage ***") print("Synonyms: {0} pairs in input. {1} pairs after tokenization. {2} pairs could be constructed from model-vocabulary.".format(str(num_lines), str(len(tk_questions)), str(num_questions))) print("Synonyms coverage: {0}% ({1}/{2})\n".format(str(coverage), str(2*num_questions), str(2*num_lines), )) def get_human_rating_deviation(embedding, word1, word2, human_similarity): # compute deviation of human similarity from cosine similarity # cosine similarity cosine_similarity = embedding.similarity(word1, word2) return np.abs(cosine_similarity - human_similarity) def test_human_similarity(embedding, file_src): """ Compare cosine similarity of 2 word-vectors against a similarity value based on human ratings. Each line in the file contains two words and the similarity value, separated by ':'. The datasets were obtained by asking human subjects to assign a similarity or relatedness judgment to a number of German word pairs. https://www.ukp.tu-darmstadt.de/data/semantic-relatedness/german-relatedness-datasets/ """ config = embedding.config tokenizer = pp.get_tokenizer(config) vocab = embedding.get_vocab() vocab_size = len(vocab) # accumulate error and count test instances summed_error = 0.0 n_test_instances = 0 n_skipped_instances = 0 summed_random_error = 0.0 # load file to lines with open(file_src, 'r') as csvfile: filereader = csv.reader(csvfile, delimiter=':',) next(filereader) # process line by line for line in filereader: n_test_instances += 1 # split lines to instances word1 = tokenizer.tokenize(line[0])[0] word2 = tokenizer.tokenize(line[1])[0] human_similarity = np.float32(line[2]) # check if both words are in vocab if (word1 in embedding.get_vocab() and word2 in embedding.get_vocab()): # add current deviation to error deviation = get_human_rating_deviation(embedding, word1, word2, human_similarity) summed_error += deviation # get a random error for comparison rand_word1 = vocab[randint(0, vocab_size -1)] rand_word2 = vocab[randint(0, vocab_size -1)] random_dev = get_human_rating_deviation(embedding, rand_word1, rand_word2, human_similarity) summed_random_error += random_dev else: n_skipped_instances += 1 # print results print("\n*** Human-Similarity ***") print("Number of instances: {0}, skipped: {1}" .format(str(n_test_instances), str(n_skipped_instances))) # check whether we found any valid test instance n_processed_instances = n_test_instances - n_skipped_instances if (n_processed_instances == 0): print("Error: No instance could be computed with this model.") else: mean_error = summed_error / n_processed_instances random_error = summed_random_error / n_processed_instances print("random error: {0}, mean error: {1}" .format(str(random_error), str(mean_error))) #### Visualization #### def visualize_words(embedding, word_list, n_nearest_neighbours): # get indexes and words that you want to visualize words_to_visualize = [] # word_indexes_to_visualize = [] # get all words and neighbors that you want to visualize for word in word_list: if not embedding.may_construct_word_vec(word): continue words_to_visualize.append(word) # word_indexes_to_visualize.append(model.ix(word)) # get neighbours of word neighbours = [n for (n, m) in embedding.most_similar_n(word, n_nearest_neighbours)] words_to_visualize.extend(neighbours) #word_indexes_to_visualize.extend(indexes) # get vectors from indexes to visualize if words_to_visualize == []: print("No word found to show.") return emb_vectors = np.vstack([embedding.word_vec(word) for word in words_to_visualize]) # project down to 2D pca = PCA(n_components=2) emb_vec_2D = pca.fit_transform(emb_vectors) n_inputs = len(word_list) for i in range(n_inputs): # group word and it's neighbours together (results in different color in plot) lower = i*n_nearest_neighbours + i upper = (i+1)*n_nearest_neighbours + (i+1) # plot 2D plt.scatter(emb_vec_2D[lower:upper, 0], emb_vec_2D[lower:upper, 1]) for label, x, y in zip(words_to_visualize, emb_vec_2D[:, 0], emb_vec_2D[:, 1]): plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points') # find nice axes for plot lower_x = min(emb_vec_2D[:, 0]) upper_x = max(emb_vec_2D[:, 0]) lower_y = min(emb_vec_2D[:, 1]) upper_y = max(emb_vec_2D[:, 1]) # 10% of padding on all sides pad_x = 0.1 * abs(upper_x - lower_x) pad_y = 0.1 * abs(upper_y - lower_y) plt.xlim([lower_x - pad_x, upper_x + pad_x]) plt.ylim([lower_y - pad_y, upper_y + pad_y]) plt.show()
mit
bimbam23/tools-iuc
datatypes/snpsift_dbnsfp_datatypes/snpsift_dbnsfp.py
9
3924
""" SnpSift dbNSFP datatypes """ import gzip import logging import os import os.path import sys import traceback from galaxy.datatypes.data import Text from galaxy.datatypes.metadata import MetadataElement log = logging.getLogger(__name__) class SnpSiftDbNSFP( Text ): """Class describing a dbNSFP database prepared fpr use by SnpSift dbnsfp """ MetadataElement( name='reference_name', default='dbSNFP', desc='Reference Name', readonly=True, visible=True, set_in_upload=True, no_value='dbSNFP' ) MetadataElement( name="bgzip", default=None, desc="dbNSFP bgzip", readonly=True, visible=True, no_value=None ) MetadataElement( name="index", default=None, desc="Tabix Index File", readonly=True, visible=True, no_value=None) MetadataElement( name="annotation", default=[], desc="Annotation Names", readonly=True, visible=True, no_value=[] ) file_ext = "snpsiftdbnsfp" composite_type = 'auto_primary_file' allow_datatype_change = False """ ## The dbNSFP file is a tabular file with 1 header line ## The first 4 columns are required to be: chrom pos ref alt ## These match columns 1,2,4,5 of the VCF file ## SnpSift requires the file to be block-gzipped and the indexed with samtools tabix ## Example: ## Compress using block-gzip algorithm bgzip dbNSFP2.3.txt ## Create tabix index tabix -s 1 -b 2 -e 2 dbNSFP2.3.txt.gz """ def __init__( self, **kwd ): Text.__init__( self, **kwd ) self.add_composite_file('%s.grp', description='Group File', substitute_name_with_metadata='reference_name', is_binary=False) self.add_composite_file('%s.ti', description='', substitute_name_with_metadata='reference_name', is_binary=False) def init_meta( self, dataset, copy_from=None ): Text.init_meta( self, dataset, copy_from=copy_from ) def generate_primary_file(self, dataset=None): """ This is called only at upload to write the html file cannot rename the datasets here - they come with the default unfortunately """ self.regenerate_primary_file(dataset) def regenerate_primary_file(self, dataset): """ cannot do this until we are setting metadata """ annotations = "dbNSFP Annotations: %s\n" % ','.join(dataset.metadata.annotation) f = open(dataset.file_name, 'a') if dataset.metadata.bgzip: bn = dataset.metadata.bgzip f.write(bn) f.write('\n') f.write(annotations) f.close() def set_meta( self, dataset, overwrite=True, **kwd ): try: efp = dataset.extra_files_path if os.path.exists(efp): flist = os.listdir(efp) for i, fname in enumerate(flist): if fname.endswith('.gz'): dataset.metadata.bgzip = fname try: fh = gzip.open(os.path.join(efp, fname), 'r') buf = fh.read(5000) lines = buf.splitlines() headers = lines[0].split('\t') dataset.metadata.annotation = headers[4:] except Exception as e: log.warn("set_meta fname: %s %s" % (fname, str(e))) traceback.print_stack(file=sys.stderr) finally: fh.close() if fname.endswith('.tbi'): dataset.metadata.index = fname self.regenerate_primary_file(dataset) except Exception as e: log.warn("set_meta fname: %s %s" % (dataset.file_name if dataset and dataset.file_name else 'Unkwown', str(e))) traceback.print_stack(file=sys.stderr) if __name__ == '__main__': import doctest doctest.testmod(sys.modules[__name__])
mit
dannyboi104/SickRage
autoProcessTV/lib/requests/packages/chardet/universaldetector.py
1776
6840
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants import sys import codecs from .latin1prober import Latin1Prober # windows-1252 from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets from .sbcsgroupprober import SBCSGroupProber # single-byte character sets from .escprober import EscCharSetProber # ISO-2122, etc. import re MINIMUM_THRESHOLD = 0.20 ePureAscii = 0 eEscAscii = 1 eHighbyte = 2 class UniversalDetector: def __init__(self): self._highBitDetector = re.compile(b'[\x80-\xFF]') self._escDetector = re.compile(b'(\033|~{)') self._mEscCharSetProber = None self._mCharSetProbers = [] self.reset() def reset(self): self.result = {'encoding': None, 'confidence': 0.0} self.done = False self._mStart = True self._mGotData = False self._mInputState = ePureAscii self._mLastChar = b'' if self._mEscCharSetProber: self._mEscCharSetProber.reset() for prober in self._mCharSetProbers: prober.reset() def feed(self, aBuf): if self.done: return aLen = len(aBuf) if not aLen: return if not self._mGotData: # If the data starts with BOM, we know it is UTF if aBuf[:3] == codecs.BOM_UTF8: # EF BB BF UTF-8 with BOM self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0} elif aBuf[:4] == codecs.BOM_UTF32_LE: # FF FE 00 00 UTF-32, little-endian BOM self.result = {'encoding': "UTF-32LE", 'confidence': 1.0} elif aBuf[:4] == codecs.BOM_UTF32_BE: # 00 00 FE FF UTF-32, big-endian BOM self.result = {'encoding': "UTF-32BE", 'confidence': 1.0} elif aBuf[:4] == b'\xFE\xFF\x00\x00': # FE FF 00 00 UCS-4, unusual octet order BOM (3412) self.result = { 'encoding': "X-ISO-10646-UCS-4-3412", 'confidence': 1.0 } elif aBuf[:4] == b'\x00\x00\xFF\xFE': # 00 00 FF FE UCS-4, unusual octet order BOM (2143) self.result = { 'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0 } elif aBuf[:2] == codecs.BOM_LE: # FF FE UTF-16, little endian BOM self.result = {'encoding': "UTF-16LE", 'confidence': 1.0} elif aBuf[:2] == codecs.BOM_BE: # FE FF UTF-16, big endian BOM self.result = {'encoding': "UTF-16BE", 'confidence': 1.0} self._mGotData = True if self.result['encoding'] and (self.result['confidence'] > 0.0): self.done = True return if self._mInputState == ePureAscii: if self._highBitDetector.search(aBuf): self._mInputState = eHighbyte elif ((self._mInputState == ePureAscii) and self._escDetector.search(self._mLastChar + aBuf)): self._mInputState = eEscAscii self._mLastChar = aBuf[-1:] if self._mInputState == eEscAscii: if not self._mEscCharSetProber: self._mEscCharSetProber = EscCharSetProber() if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt: self.result = {'encoding': self._mEscCharSetProber.get_charset_name(), 'confidence': self._mEscCharSetProber.get_confidence()} self.done = True elif self._mInputState == eHighbyte: if not self._mCharSetProbers: self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), Latin1Prober()] for prober in self._mCharSetProbers: if prober.feed(aBuf) == constants.eFoundIt: self.result = {'encoding': prober.get_charset_name(), 'confidence': prober.get_confidence()} self.done = True break def close(self): if self.done: return if not self._mGotData: if constants._debug: sys.stderr.write('no data received!\n') return self.done = True if self._mInputState == ePureAscii: self.result = {'encoding': 'ascii', 'confidence': 1.0} return self.result if self._mInputState == eHighbyte: proberConfidence = None maxProberConfidence = 0.0 maxProber = None for prober in self._mCharSetProbers: if not prober: continue proberConfidence = prober.get_confidence() if proberConfidence > maxProberConfidence: maxProberConfidence = proberConfidence maxProber = prober if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD): self.result = {'encoding': maxProber.get_charset_name(), 'confidence': maxProber.get_confidence()} return self.result if constants._debug: sys.stderr.write('no probers hit minimum threshhold\n') for prober in self._mCharSetProbers[0].mProbers: if not prober: continue sys.stderr.write('%s confidence = %s\n' % (prober.get_charset_name(), prober.get_confidence()))
gpl-3.0
shail2810/nova
nova/tests/functional/api_sample_tests/test_quota_sets.py
2
4070
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3): ADMIN_API = True extension_name = "os-quota-sets" _api_version = 'v2' def _get_flags(self): f = super(QuotaSetsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.server_group_quotas.' 'Server_group_quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.quotas.Quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_quotas.Extended_quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.user_quotas.User_quotas') return f def test_show_quotas(self): # Get api sample to show quotas. response = self._do_get('os-quota-sets/fake_tenant') self._verify_response('quotas-show-get-resp', {}, response, 200) def test_show_quotas_defaults(self): # Get api sample to show quotas defaults. response = self._do_get('os-quota-sets/fake_tenant/defaults') self._verify_response('quotas-show-defaults-get-resp', {}, response, 200) def test_update_quotas(self): # Get api sample to update quotas. response = self._do_put('os-quota-sets/fake_tenant', 'quotas-update-post-req', {}) self._verify_response('quotas-update-post-resp', {}, response, 200) def test_delete_quotas(self): # Get api sample to delete quota. response = self._do_delete('os-quota-sets/fake_tenant') self.assertEqual(response.status_code, 202) self.assertEqual(response.content, '') def test_update_quotas_force(self): # Get api sample to update quotas. response = self._do_put('os-quota-sets/fake_tenant', 'quotas-update-force-post-req', {}) return self._verify_response('quotas-update-force-post-resp', {}, response, 200) def test_show_quotas_for_user(self): # Get api sample to show quotas for user. response = self._do_get('os-quota-sets/fake_tenant?user_id=1') self._verify_response('user-quotas-show-get-resp', {}, response, 200) def test_delete_quotas_for_user(self): response = self._do_delete('os-quota-sets/fake_tenant?user_id=1') self.assertEqual(response.status_code, 202) self.assertEqual(response.content, '') def test_update_quotas_for_user(self): # Get api sample to update quotas for user. response = self._do_put('os-quota-sets/fake_tenant?user_id=1', 'user-quotas-update-post-req', {}) return self._verify_response('user-quotas-update-post-resp', {}, response, 200)
apache-2.0
hoehnp/navit_test
lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py
353
14161
"""Module for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning """ from __future__ import absolute_import, division, unicode_literals # pylint:disable=protected-access import warnings import re import sys from . import base from ..constants import DataLossWarning from .. import constants from . import etree as etree_builders from .. import _ihatexml import lxml.etree as etree fullTree = True tag_regexp = re.compile("{([^}]*)}(.*)") comment_type = etree.Comment("asd").tag class DocumentType(object): def __init__(self, name, publicId, systemId): self.name = name self.publicId = publicId self.systemId = systemId class Document(object): def __init__(self): self._elementTree = None self._childNodes = [] def appendChild(self, element): self._elementTree.getroot().addnext(element._element) def _getChildNodes(self): return self._childNodes childNodes = property(_getChildNodes) def testSerializer(element): rv = [] infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) def serializeElement(element, indent=0): if not hasattr(element, "tag"): if hasattr(element, "getroot"): # Full tree case rv.append("#document") if element.docinfo.internalDTD: if not (element.docinfo.public_id or element.docinfo.system_url): dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name else: dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( element.docinfo.root_name, element.docinfo.public_id, element.docinfo.system_url) rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) next_element = element.getroot() while next_element.getprevious() is not None: next_element = next_element.getprevious() while next_element is not None: serializeElement(next_element, indent + 2) next_element = next_element.getnext() elif isinstance(element, str) or isinstance(element, bytes): # Text in a fragment assert isinstance(element, str) or sys.version_info[0] == 2 rv.append("|%s\"%s\"" % (' ' * indent, element)) else: # Fragment case rv.append("#document-fragment") for next_element in element: serializeElement(next_element, indent + 2) elif element.tag == comment_type: rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) else: assert isinstance(element, etree._Element) nsmatch = etree_builders.tag_regexp.match(element.tag) if nsmatch is not None: ns = nsmatch.group(1) tag = nsmatch.group(2) prefix = constants.prefixes[ns] rv.append("|%s<%s %s>" % (' ' * indent, prefix, infosetFilter.fromXmlName(tag))) else: rv.append("|%s<%s>" % (' ' * indent, infosetFilter.fromXmlName(element.tag))) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() name = infosetFilter.fromXmlName(name) prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = infosetFilter.fromXmlName(name) attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif element.tag == comment_type: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (element.tag,)) else: attr = " ".join(["%s=\"%s\"" % (name, value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv) class TreeBuilder(base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None commentClass = None fragmentClass = Document implementation = etree def __init__(self, namespaceHTMLElements, fullTree=False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) self.namespaceHTMLElements = namespaceHTMLElements class Attributes(dict): def __init__(self, element, value=None): if value is None: value = {} self._element = element dict.__init__(self, value) # pylint:disable=non-parent-init-called for key, value in self.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value def __setitem__(self, key, value): dict.__setitem__(self, key, value) if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value class Element(builder.Element): def __init__(self, name, namespace): name = infosetFilter.coerceElement(name) builder.Element.__init__(self, name, namespace=namespace) self._attributes = Attributes(self) def _setName(self, name): self._name = infosetFilter.coerceElement(name) self._element.tag = self._getETreeTag( self._name, self._namespace) def _getName(self): return infosetFilter.fromXmlName(self._name) name = property(_getName, _setName) def _getAttributes(self): return self._attributes def _setAttributes(self, attributes): self._attributes = Attributes(self, attributes) attributes = property(_getAttributes, _setAttributes) def insertText(self, data, insertBefore=None): data = infosetFilter.coerceCharacters(data) builder.Element.insertText(self, data, insertBefore) def appendChild(self, child): builder.Element.appendChild(self, child) class Comment(builder.Comment): def __init__(self, data): data = infosetFilter.coerceComment(data) builder.Comment.__init__(self, data) def _setData(self, data): data = infosetFilter.coerceComment(data) self._element.text = data def _getData(self): return self._element.text data = property(_getData, _setData) self.elementClass = Element self.commentClass = Comment # self.fragmentClass = builder.DocumentFragment base.TreeBuilder.__init__(self, namespaceHTMLElements) def reset(self): base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._elementTree else: return self.document._elementTree.getroot() def getFragment(self): fragment = [] element = self.openElements[0]._element if element.text: fragment.append(element.text) fragment.extend(list(element)) if element.tail: fragment.append(element.tail) return fragment def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] if not name: warnings.warn("lxml cannot represent empty doctype", DataLossWarning) self.doctype = None else: coercedName = self.infosetFilter.coerceElement(name) if coercedName != name: warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) doctype = self.doctypeClass(coercedName, publicId, systemId) self.doctype = doctype def insertCommentInitial(self, data, parent=None): assert parent is None or parent is self.document assert self.document._elementTree is None self.initial_comments.append(data) def insertCommentMain(self, data, parent=None): if (parent == self.document and self.document._elementTree.getroot()[-1].tag == comment_type): warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) super(TreeBuilder, self).insertComment(data, parent) def insertRoot(self, token): """Create the document root""" # Because of the way libxml2 works, it doesn't seem to be possible to # alter information like the doctype after the tree has been parsed. # Therefore we need to use the built-in parser to create our initial # tree, after which we can add elements like normal docStr = "" if self.doctype: assert self.doctype.name docStr += "<!DOCTYPE %s" % self.doctype.name if (self.doctype.publicId is not None or self.doctype.systemId is not None): docStr += (' PUBLIC "%s" ' % (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) if self.doctype.systemId: sysid = self.doctype.systemId if sysid.find("'") >= 0 and sysid.find('"') >= 0: warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) sysid = sysid.replace("'", 'U00027') if sysid.find("'") >= 0: docStr += '"%s"' % sysid else: docStr += "'%s'" % sysid else: docStr += "''" docStr += ">" if self.doctype.name != token["name"]: warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" root = etree.fromstring(docStr) # Append the initial comments: for comment_token in self.initial_comments: comment = self.commentClass(comment_token["data"]) root.addprevious(comment._element) # Create the root document and add the ElementTree to it self.document = self.documentClass() self.document._elementTree = root.getroottree() # Give the root element the right name name = token["name"] namespace = token.get("namespace", self.defaultNamespace) if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) root.tag = etree_tag # Add the root element to the internal child/open data structures root_element = self.elementClass(name, namespace) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) # Reset to the default insert comment function self.insertComment = self.insertCommentMain
gpl-2.0
mihaic/brainiak
brainiak/utils/utils.py
1
36227
# Copyright 2016 Intel Corporation, Princeton University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import re import warnings import os.path import psutil from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf from scipy.fftpack import fft, ifft import logging logger = logging.getLogger(__name__) """ Some utility functions that can be used by different algorithms """ __all__ = [ "array_correlation", "center_mass_exp", "concatenate_not_none", "cov2corr", "from_tri_2_sym", "from_sym_2_tri", "gen_design", "phase_randomize", "p_from_null", "ReadDesign", "sumexp_stable", "usable_cpu_count", ] def circ_dist(x, y): """ Computes the pairwise circular distance between two arrays of points (in radians). Parameters ---------- x: numpy vector of positions on a circle, in radians. y: numpy vector of positions on a circle, in radians. Returns ------- r: numpy vector of distances between inputs. """ if x.size != y.size: raise ValueError("Input sizes must match to compute pairwise " "comparisons.") r = np.angle(np.exp(x*1j) / np.exp(y*1j)) return r def from_tri_2_sym(tri, dim): """convert a upper triangular matrix in 1D format to 2D symmetric matrix Parameters ---------- tri: 1D array Contains elements of upper triangular matrix dim : int The dimension of target matrix. Returns ------- symm : 2D array Symmetric matrix in shape=[dim, dim] """ symm = np.zeros((dim, dim)) symm[np.triu_indices(dim)] = tri return symm def from_sym_2_tri(symm): """convert a 2D symmetric matrix to an upper triangular matrix in 1D format Parameters ---------- symm : 2D array Symmetric matrix Returns ------- tri: 1D array Contains elements of upper triangular matrix """ inds = np.triu_indices_from(symm) tri = symm[inds] return tri def sumexp_stable(data): """Compute the sum of exponents for a list of samples Parameters ---------- data : array, shape=[features, samples] A data array containing samples. Returns ------- result_sum : array, shape=[samples,] The sum of exponents for each sample divided by the exponent of the maximum feature value in the sample. max_value : array, shape=[samples,] The maximum feature value for each sample. result_exp : array, shape=[features, samples] The exponent of each element in each sample divided by the exponent of the maximum feature value in the sample. Note ---- This function is more stable than computing the sum(exp(v)). It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function. """ max_value = data.max(axis=0) result_exp = np.exp(data - max_value) result_sum = np.sum(result_exp, axis=0) return result_sum, max_value, result_exp def concatenate_not_none(data, axis=0): """Construct a numpy array by stacking not-None arrays in a list Parameters ---------- data : list of arrays The list of arrays to be concatenated. Arrays have same shape in all but one dimension or are None, in which case they are ignored. axis : int, default = 0 Axis for the concatenation Returns ------- data_stacked : array The resulting concatenated array. """ # Get the indexes of the arrays in the list mask = [] for i in range(len(data)): if data[i] is not None: mask.append(i) # Concatenate them stacked = np.concatenate([data[i] for i in mask], axis=axis) return stacked def cov2corr(cov): """Calculate the correlation matrix based on a covariance matrix Parameters ---------- cov: 2D array Returns ------- corr: 2D array correlation converted from the covarince matrix """ assert cov.ndim == 2, 'covariance matrix should be 2D array' inv_sd = 1 / np.sqrt(np.diag(cov)) corr = cov * inv_sd[None, :] * inv_sd[:, None] return corr class ReadDesign: """A class which has the ability of reading in design matrix in .1D file, generated by AFNI's 3dDeconvolve. Parameters ---------- fname: string, the address of the file to read. include_orth: Boollean, whether to include "orthogonal" regressors in the nuisance regressors which are usually head motion parameters. All the columns of design matrix are still going to be read in, but the attribute cols_used will reflect whether these orthogonal regressors are to be included for furhter analysis. Note that these are not entered into design_task attribute which include only regressors related to task conditions. include_pols: Boollean, whether to include polynomial regressors in the nuisance regressors which are used to capture slow drift of signals. Attributes ---------- design: 2d array. The design matrix read in from the csv file. design_task: 2d array. The part of design matrix corresponding to task conditions. n_col: number of total columns in the design matrix. column_types: 1d array. the types of each column in the design matrix. 0 for orthogonal regressors (usually head motion parameters), -1 for polynomial basis (capturing slow drift of signals), values > 0 for stimulus conditions n_basis: scalar. The number of polynomial bases in the designn matrix. n_stim: scalar. The number of stimulus conditions. n_orth: scalar. The number of orthogoanal regressors (usually head motions) StimLabels: list. The names of each column in the design matrix. """ def __init__(self, fname=None, include_orth=True, include_pols=True): if fname is None: # fname is the name of the file to read in the design matrix self.design = np.zeros([0, 0]) self.n_col = 0 # number of columns (conditions) in the design matrix self.column_types = np.ones(0) self.n_basis = 0 self.n_stim = 0 self.n_orth = 0 self.StimLabels = [] else: # isAFNI = re.match(r'.+[.](1D|1d|txt)$', fname) filename, ext = os.path.splitext(fname) # We assume all AFNI 1D files have extension of 1D or 1d or txt if ext in ['.1D', '.1d', '.txt']: self.read_afni(fname=fname) self.include_orth = include_orth self.include_pols = include_pols # The two flags above dictates whether columns corresponding to # baseline drift modeled by polynomial functions of time and # columns corresponding to other orthogonal signals (usually motion) # are included in nuisance regressors. self.cols_task = np.where(self.column_types == 1)[0] self.design_task = self.design[:, self.cols_task] if np.ndim(self.design_task) == 1: self.design_task = self.design_task[:, None] # part of the design matrix related to task conditions. self.n_TR = np.size(self.design_task, axis=0) self.cols_nuisance = np.array([]) if self.include_orth: self.cols_nuisance = np.int0( np.sort(np.append(self.cols_nuisance, np.where(self.column_types == 0)[0]))) if self.include_pols: self.cols_nuisance = np.int0( np.sort(np.append(self.cols_nuisance, np.where(self.column_types == -1)[0]))) if np.size(self.cols_nuisance) > 0: self.reg_nuisance = self.design[:, self.cols_nuisance] if np.ndim(self.reg_nuisance) == 1: self.reg_nuisance = self.reg_nuisance[:, None] else: self.reg_nuisance = None # Nuisance regressors for motion, baseline, etc. def read_afni(self, fname): # Read design file written by AFNI self.n_basis = 0 self.n_stim = 0 self.n_orth = 0 self.StimLabels = [] self.design = np.loadtxt(fname, ndmin=2) with open(fname) as f: all_text = f.read() find_n_column = re.compile( r'^#[ ]+ni_type[ ]+=[ ]+"(?P<n_col>\d+)[*]', re.MULTILINE) n_col_found = find_n_column.search(all_text) if n_col_found: self.n_col = int(n_col_found.group('n_col')) if self.n_col != np.size(self.design, axis=1): warnings.warn( 'The number of columns in the design matrix' + 'does not match the header information') self.n_col = np.size(self.design, axis=1) else: self.n_col = np.size(self.design, axis=1) self.column_types = np.ones(self.n_col) # default that all columns are conditions of interest find_ColumnGroups = re.compile( r'^#[ ]+ColumnGroups[ ]+=[ ]+"(?P<CGtext>.+)"', re.MULTILINE) CG_found = find_ColumnGroups.search(all_text) if CG_found: CG_text = re.split(',', CG_found.group('CGtext')) curr_idx = 0 for CG in CG_text: split_by_at = re.split('@', CG) if len(split_by_at) == 2: # the first tells the number of columns in this condition # the second tells the condition type n_this_cond = int(split_by_at[0]) self.column_types[curr_idx:curr_idx + n_this_cond] = \ int(split_by_at[1]) curr_idx += n_this_cond elif len(split_by_at) == 1 and \ not re.search(r'\..', split_by_at[0]): # Just a number, and not the type like '1..4' self.column_types[curr_idx] = int(split_by_at[0]) curr_idx += 1 else: # must be a single stimulus condition split_by_dots = re.split(r'\..', CG) n_this_cond = int(split_by_dots[1]) self.column_types[curr_idx:curr_idx + n_this_cond] = 1 curr_idx += n_this_cond self.n_basis = np.sum(self.column_types == -1) self.n_stim = np.sum(self.column_types > 0) self.n_orth = np.sum(self.column_types == 0) find_StimLabels = re.compile( r'^#[ ]+StimLabels[ ]+=[ ]+"(?P<SLtext>.+)"', re.MULTILINE) StimLabels_found = find_StimLabels.search(all_text) if StimLabels_found: self.StimLabels = \ re.split(r'[ ;]+', StimLabels_found.group('SLtext')) else: self.StimLabels = [] def gen_design(stimtime_files, scan_duration, TR, style='FSL', temp_res=0.01, hrf_para={'response_delay': 6, 'undershoot_delay': 12, 'response_dispersion': 0.9, 'undershoot_dispersion': 0.9, 'undershoot_scale': 0.035}): """ Generate design matrix based on a list of names of stimulus timing files. The function will read each file, and generate a numpy array of size [time_points \\* condition], where time_points equals duration / TR, and condition is the size of stimtime_filenames. Each column is the hypothetical fMRI response based on the stimulus timing in the corresponding file of stimtime_files. This function uses generate_stimfunction and double_gamma_hrf of brainiak.utils.fmrisim. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files will be interpretated based on the style parameter. Details are explained under the style parameter. scan_duration: float or a list (or a 1D numpy array) of numbers. Total duration of each fMRI scan, in unit of seconds. If there are multiple runs, the duration should be a list (or 1-d numpy array) of numbers. If it is a list, then each number in the list represents the duration of the corresponding scan in the stimtime_files. If only a number is provided, it is assumed that there is only one fMRI scan lasting for scan_duration. TR: float. The sampling period of fMRI, in unit of seconds. style: string, default: 'FSL' Acceptable inputs: 'FSL', 'AFNI' The formating style of the stimtime_files. 'FSL' style has one line for each event of the same condition. Each line contains three numbers. The first number is the onset of the event relative to the onset of the first scan, in units of seconds. (Multiple scans should be treated as a concatenated long scan for the purpose of calculating onsets. However, the design matrix from one scan won't leak into the next). The second number is the duration of the event, in unit of seconds. The third number is the amplitude modulation (or weight) of the response. It is acceptable to not provide the weight, or not provide both duration and weight. In such cases, these parameters will default to 1.0. This code will accept timing files with only 1 or 2 columns for convenience but please note that the FSL package does not allow this 'AFNI' style has one line for each scan (run). Each line has a few triplets in the format of stim_onsets*weight:duration (or simpler, see below), separated by spaces. For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s, modulated by weight of 2.0 and lasts for 1.5s. If some run does not include a single event of a condition (stimulus type), then you can put \\*, or a negative number, or a very large number in that line. Either duration or weight can be neglected. In such cases, they will default to 1.0. For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all means an event starting at 3.0s, lasting for 1.0s, with amplitude modulation of 1.0. temp_res: float, default: 0.01 Temporal resolution of fMRI, in second. hrf_para: dictionary The parameters of the double-Gamma hemodynamic response function. To set different parameters, supply a dictionary with the same set of keys as the default, and replace the corresponding values with the new values. Returns ------- design: 2D numpy array design matrix. Each time row represents one TR (fMRI sampling time point) and each column represents one experiment condition, in the order in stimtime_files """ if np.ndim(scan_duration) == 0: scan_duration = [scan_duration] scan_duration = np.array(scan_duration) assert np.all(scan_duration > TR), \ 'scan duration should be longer than a TR' if type(stimtime_files) is str: stimtime_files = [stimtime_files] assert TR > 0, 'TR should be positive' assert style == 'FSL' or style == 'AFNI', 'style can only be FSL or AFNI' n_C = len(stimtime_files) # number of conditions n_S = np.size(scan_duration) # number of scans if n_S > 1: design = [np.empty([int(np.round(duration / TR)), n_C]) for duration in scan_duration] else: design = [np.empty([int(np.round(scan_duration / TR)), n_C])] scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0) if style == 'FSL': design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff) elif style == 'AFNI': design_info = _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff) response_delay = hrf_para['response_delay'] undershoot_delay = hrf_para['undershoot_delay'] response_disp = hrf_para['response_dispersion'] undershoot_disp = hrf_para['undershoot_dispersion'] undershoot_scale = hrf_para['undershoot_scale'] # generate design matrix for i_s in range(n_S): for i_c in range(n_C): if len(design_info[i_s][i_c]['onset']) > 0: stimfunction = generate_stimfunction( onsets=design_info[i_s][i_c]['onset'], event_durations=design_info[i_s][i_c]['duration'], total_time=scan_duration[i_s], weights=design_info[i_s][i_c]['weight'], temporal_resolution=1.0/temp_res) hrf = _double_gamma_hrf(response_delay=response_delay, undershoot_delay=undershoot_delay, response_dispersion=response_disp, undershoot_dispersion=undershoot_disp, undershoot_scale=undershoot_scale, temporal_resolution=1.0/temp_res) design[i_s][:, i_c] = convolve_hrf( stimfunction, TR, hrf_type=hrf, scale_function=False, temporal_resolution=1.0 / temp_res).transpose() * temp_res else: design[i_s][:, i_c] = 0.0 # We multiply the resulting design matrix with # the temporal resolution to normalize it. # We do not use the internal normalization # in double_gamma_hrf because it does not guarantee # normalizing with the same constant. return np.concatenate(design, axis=0) def _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff): """ Utility called by gen_design. It reads in one or more stimulus timing file comforming to FSL style, and return a list (size of [#run \\* #condition]) of dictionary including onsets, durations and weights of each event. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files should follow the style of FSL stimulus timing files, refer to gen_design. n_C: integer, number of task conditions n_S: integer, number of scans scan_onoff: list of numbers. The onset of each scan after concatenating all scans, together with the offset of the last scan. For example, if 3 scans of duration 100s, 150s, 120s are run, scan_onoff is [0, 100, 250, 370] Returns ------- design_info: list of stimulus information The first level of the list correspond to different scans. The second level of the list correspond to different conditions. Each item in the list is a dictiornary with keys "onset", "duration" and "weight". If one condition includes no event in a scan, the values of these keys in that scan of the condition are empty lists. See also -------- gen_design """ design_info = [[{'onset': [], 'duration': [], 'weight': []} for i_c in range(n_C)] for i_s in range(n_S)] # Read stimulus timing files for i_c in range(n_C): with open(stimtime_files[i_c]) as f: for line in f.readlines(): tmp = line.strip().split() i_s = np.where( np.logical_and(scan_onoff[:-1] <= float(tmp[0]), scan_onoff[1:] > float(tmp[0])))[0] if len(i_s) == 1: i_s = i_s[0] design_info[i_s][i_c]['onset'].append(float(tmp[0]) - scan_onoff[i_s]) if len(tmp) >= 2: design_info[i_s][i_c]['duration'].append(float(tmp[1])) else: design_info[i_s][i_c]['duration'].append(1.0) if len(tmp) >= 3: design_info[i_s][i_c]['weight'].append(float(tmp[2])) else: design_info[i_s][i_c]['weight'].append(1.0) return design_info def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff): """ Utility called by gen_design. It reads in one or more stimulus timing file comforming to AFNI style, and return a list (size of ``[number of runs \\* number of conditions]``) of dictionary including onsets, durations and weights of each event. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files should follow the style of AFNI stimulus timing files, refer to gen_design. n_C: integer, number of task conditions n_S: integer, number of scans scan_onoff: list of numbers. The onset of each scan after concatenating all scans, together with the offset of the last scan. For example, if 3 scans of duration 100s, 150s, 120s are run, scan_onoff is [0, 100, 250, 370] Returns ------- design_info: list of stimulus information The first level of the list correspond to different scans. The second level of the list correspond to different conditions. Each item in the list is a dictiornary with keys "onset", "duration" and "weight". If one condition includes no event in a scan, the values of these keys in that scan of the condition are empty lists. See also -------- gen_design """ design_info = [[{'onset': [], 'duration': [], 'weight': []} for i_c in range(n_C)] for i_s in range(n_S)] # Read stimulus timing files for i_c in range(n_C): with open(stimtime_files[i_c]) as f: text = f.readlines() assert len(text) == n_S, \ 'Number of lines does not match number of runs!' for i_s, line in enumerate(text): events = line.strip().split() if events[0] == '*': continue for event in events: assert event != '*' tmp = str.split(event, ':') if len(tmp) == 2: duration = float(tmp[1]) else: duration = 1.0 tmp = str.split(tmp[0], '*') if len(tmp) == 2: weight = float(tmp[1]) else: weight = 1.0 if (float(tmp[0]) >= 0 and float(tmp[0]) < scan_onoff[i_s + 1] - scan_onoff[i_s]): design_info[i_s][i_c]['onset'].append(float(tmp[0])) design_info[i_s][i_c]['duration'].append(duration) design_info[i_s][i_c]['weight'].append(weight) return design_info def center_mass_exp(interval, scale=1.0): """ Calculate the center of mass of negative exponential distribution p(x) = exp(-x / scale) / scale in the interval of (interval_left, interval_right). scale is the same scale parameter as scipy.stats.expon.pdf Parameters ---------- interval: size 2 tuple, float interval must be in the form of (interval_left, interval_right), where interval_left/interval_right is the starting/end point of the interval in which the center of mass is calculated for exponential distribution. Note that interval_left must be non-negative, since exponential is not supported in the negative domain, and interval_right must be bigger than interval_left (thus positive) to form a well-defined interval. scale: float, positive The scale parameter of the exponential distribution. See above. Returns ------- m: float The center of mass in the interval of (interval_left, interval_right) for exponential distribution. """ assert isinstance(interval, tuple), 'interval must be a tuple' assert len(interval) == 2, 'interval must be length two' (interval_left, interval_right) = interval assert interval_left >= 0, 'interval_left must be non-negative' assert interval_right > interval_left, \ 'interval_right must be bigger than interval_left' assert scale > 0, 'scale must be positive' if interval_right < np.inf: return ((interval_left + scale) * np.exp(-interval_left / scale) - ( scale + interval_right) * np.exp(-interval_right / scale)) / ( np.exp(-interval_left / scale) - np.exp(-interval_right / scale)) else: return interval_left + scale def usable_cpu_count(): """Get number of CPUs usable by the current process. Takes into consideration cpusets restrictions. Returns ------- int """ try: result = len(os.sched_getaffinity(0)) except AttributeError: try: result = len(psutil.Process().cpu_affinity()) except AttributeError: result = os.cpu_count() return result def phase_randomize(data, voxelwise=False, random_state=None): """Randomize phase of time series across subjects For each subject, apply Fourier transform to voxel time series and then randomly shift the phase of each frequency before inverting back into the time domain. This yields time series with the same power spectrum (and thus the same autocorrelation) as the original time series but will remove any meaningful temporal relationships among time series across subjects. By default (voxelwise=False), the same phase shift is applied across all voxels; however if voxelwise=True, different random phase shifts are applied to each voxel. The typical input is a time by voxels by subjects ndarray. The first dimension is assumed to be the time dimension and will be phase randomized. If a 2-dimensional ndarray is provided, the last dimension is assumed to be subjects, and different phase randomizations will be applied to each subject. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. Parameters ---------- data : ndarray (n_TRs x n_voxels x n_subjects) Data to be phase randomized (per subject) voxelwise : bool, default: False Apply same (False) or different (True) randomizations across voxels random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. Returns ---------- shifted_data : ndarray (n_TRs x n_voxels x n_subjects) Phase-randomized time series """ # Check if input is 2-dimensional data_ndim = data.ndim # Get basic shape of data data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) # Random seed to be deterministically re-randomized at each iteration if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) # Get randomized phase shifts if n_TRs % 2 == 0: # Why are we indexing from 1 not zero here? n_TRs / -1 long? pos_freq = np.arange(1, data.shape[0] // 2) neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1) else: pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1) neg_freq = np.arange(data.shape[0] - 1, (data.shape[0] - 1) // 2, -1) if not voxelwise: phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects) * 2 * np.math.pi) else: phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects) * 2 * np.math.pi) # Fast Fourier transform along time dimension of data fft_data = fft(data, axis=0) # Shift pos and neg frequencies symmetrically, to keep signal real fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts) fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts) # Inverse FFT to put data back in time domain shifted_data = np.real(ifft(fft_data, axis=0)) # Go back to 2-dimensions if input was 2-dimensional if data_ndim == 2: shifted_data = shifted_data[:, 0, :] return shifted_data def p_from_null(observed, distribution, side='two-sided', exact=False, axis=None): """Compute p-value from null distribution Returns the p-value for an observed test statistic given a null distribution. Performs either a 'two-sided' (i.e., two-tailed) test (default) or a one-sided (i.e., one-tailed) test for either the 'left' or 'right' side. For an exact test (exact=True), does not adjust for the observed test statistic; otherwise, adjusts for observed test statistic (prevents p-values of zero). If a multidimensional distribution is provided, use axis argument to specify which axis indexes resampling iterations. The implementation is based on the work in [PhipsonSmyth2010]_. .. [PhipsonSmyth2010] "Permutation p-values should never be zero: calculating exact p-values when permutations are randomly drawn.", B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics and Molecular Biology, 9, 1544-6115. https://doi.org/10.2202/1544-6115.1585 Parameters ---------- observed : float Observed test statistic distribution : ndarray Null distribution of test statistic side : str, default: 'two-sided' Perform one-sided ('left' or 'right') or 'two-sided' test axis: None or int, default: None Axis indicating resampling iterations in input distribution Returns ------- p : float p-value for observed test statistic based on null distribution """ if side not in ('two-sided', 'left', 'right'): raise ValueError("The value for 'side' must be either " "'two-sided', 'left', or 'right', got {0}". format(side)) n_samples = len(distribution) logger.info("Assuming {0} resampling iterations".format(n_samples)) if side == 'two-sided': # Numerator for two-sided test numerator = np.sum(np.abs(distribution) >= np.abs(observed), axis=axis) elif side == 'left': # Numerator for one-sided test in left tail numerator = np.sum(distribution <= observed, axis=axis) elif side == 'right': # Numerator for one-sided test in right tail numerator = np.sum(distribution >= observed, axis=axis) # If exact test all possible permutations and do not adjust if exact: p = numerator / n_samples # If not exact test, adjust number of samples to account for # observed statistic; prevents p-value from being zero else: p = (numerator + 1) / (n_samples + 1) return p def _check_timeseries_input(data): """Checks response time series input data (e.g., for ISC analysis) Input data should be a n_TRs by n_voxels by n_subjects ndarray (e.g., brainiak.image.MaskedMultiSubjectData) or a list where each item is a n_TRs by n_voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a 2D array is supplied, the last dimension is assumed to correspond to subjects. This function is generally intended to be used internally by other functions module (e.g., isc, isfc in brainiak.isc). Parameters ---------- data : ndarray or list Time series data Returns ------- data : ndarray Input time series data with standardized structure n_TRs : int Number of time points (TRs) n_voxels : int Number of voxels (or ROIs) n_subjects : int Number of subjects """ # Convert list input to 3d and check shapes if type(data) == list: data_shape = data[0].shape for i, d in enumerate(data): if d.shape != data_shape: raise ValueError("All ndarrays in input list " "must be the same shape!") if d.ndim == 1: data[i] = d[:, np.newaxis] data = np.dstack(data) # Convert input ndarray to 3d and check shape elif isinstance(data, np.ndarray): if data.ndim == 2: data = data[:, np.newaxis, :] elif data.ndim == 3: pass else: raise ValueError("Input ndarray should have 2 " "or 3 dimensions (got {0})!".format(data.ndim)) # Infer subjects, TRs, voxels and log for user to check n_TRs, n_voxels, n_subjects = data.shape logger.info("Assuming {0} subjects with {1} time points " "and {2} voxel(s) or ROI(s) for ISC analysis.".format( n_subjects, n_TRs, n_voxels)) return data, n_TRs, n_voxels, n_subjects def array_correlation(x, y, axis=0): """Column- or row-wise Pearson correlation between two arrays Computes sample Pearson correlation between two 1D or 2D arrays (e.g., two n_TRs by n_voxels arrays). For 2D arrays, computes correlation between each corresponding column (axis=0) or row (axis=1) where axis indexes observations. If axis=0 (default), each column is considered to be a variable and each row is an observation; if axis=1, each row is a variable and each column is an observation (equivalent to transposing the input arrays). Input arrays must be the same shape with corresponding variables and observations. This is intended to be an efficient method for computing correlations between two corresponding arrays with many variables (e.g., many voxels). Parameters ---------- x : 1D or 2D ndarray Array of observations for one or more variables y : 1D or 2D ndarray Array of observations for one or more variables (same shape as x) axis : int (0 or 1), default: 0 Correlation between columns (axis=0) or rows (axis=1) Returns ------- r : float or 1D ndarray Pearson correlation values for input variables """ # Accommodate array-like inputs if not isinstance(x, np.ndarray): x = np.asarray(x) if not isinstance(y, np.ndarray): y = np.asarray(y) # Check that inputs are same shape if x.shape != y.shape: raise ValueError("Input arrays must be the same shape") # Transpose if axis=1 requested (to avoid broadcasting # issues introduced by switching axis in mean and sum) if axis == 1: x, y = x.T, y.T # Center (de-mean) input variables x_demean = x - np.mean(x, axis=0) y_demean = y - np.mean(y, axis=0) # Compute summed product of centered variables numerator = np.sum(x_demean * y_demean, axis=0) # Compute sum squared error denominator = np.sqrt(np.sum(x_demean ** 2, axis=0) * np.sum(y_demean ** 2, axis=0)) return numerator / denominator
apache-2.0
beiko-lab/gengis
bin/Lib/site-packages/scipy/interpolate/__init__.py
1
3385
""" ======================================== Interpolation (:mod:`scipy.interpolate`) ======================================== .. currentmodule:: scipy.interpolate Sub-package for objects used in interpolation. As listed below, this sub-package contains spline functions and classes, one-dimensional and multi-dimensional (univariate and multivariate) interpolation classes, Lagrange and Taylor polynomial interpolators, and wrappers for `FITPACK <http://www.cisl.ucar.edu/softlib/FITPACK.html>`_ and DFITPACK functions. Univariate interpolation ======================== .. autosummary:: :toctree: generated/ interp1d BarycentricInterpolator KroghInterpolator PiecewisePolynomial PchipInterpolator barycentric_interpolate krogh_interpolate piecewise_polynomial_interpolate pchip_interpolate Multivariate interpolation ========================== Unstructured data: .. autosummary:: :toctree: generated/ griddata LinearNDInterpolator NearestNDInterpolator CloughTocher2DInterpolator Rbf interp2d For data on a grid: .. autosummary:: RectBivariateSpline .. seealso:: `scipy.ndimage.map_coordinates` 1-D Splines =========== .. autosummary:: :toctree: generated/ UnivariateSpline InterpolatedUnivariateSpline LSQUnivariateSpline The above univariate spline classes have the following methods: .. autosummary:: UnivariateSpline.__call__ UnivariateSpline.derivatives UnivariateSpline.integral UnivariateSpline.roots UnivariateSpline.derivative UnivariateSpline.antiderivative UnivariateSpline.get_coeffs UnivariateSpline.get_knots UnivariateSpline.get_residual UnivariateSpline.set_smoothing_factor Functional interface to FITPACK functions: .. autosummary:: :toctree: generated/ splrep splprep splev splint sproot spalde splder splantider bisplrep bisplev 2-D Splines =========== For data on a grid: .. autosummary:: :toctree: generated/ RectBivariateSpline RectSphereBivariateSpline For unstructured data: .. autosummary:: :toctree: generated/ BivariateSpline SmoothBivariateSpline SmoothSphereBivariateSpline LSQBivariateSpline LSQSphereBivariateSpline Low-level interface to FITPACK functions: .. autosummary:: :toctree: generated/ bisplrep bisplev Additional tools ================ .. autosummary:: :toctree: generated/ lagrange approximate_taylor_polynomial .. seealso:: `scipy.ndimage.map_coordinates`, `scipy.ndimage.spline_filter`, `scipy.signal.resample`, `scipy.signal.bspline`, `scipy.signal.gauss_spline`, `scipy.signal.qspline1d`, `scipy.signal.cspline1d`, `scipy.signal.qspline1d_eval`, `scipy.signal.cspline1d_eval`, `scipy.signal.qspline2d`, `scipy.signal.cspline2d`. """ from __future__ import division, print_function, absolute_import from .interpolate import * from .fitpack import * # New interface to fitpack library: from .fitpack2 import * from .rbf import Rbf from .polyint import * from .ndgriddata import * __all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench
gpl-3.0
thumbimigwe/echorizr
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py
2375
11628
"""Utilities for writing code that runs on Python 2 and 3""" #Copyright (c) 2010-2011 Benjamin Peterson #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software without restriction, including without limitation the rights to #use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of #the Software, and to permit persons to whom the Software is furnished to do so, #subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS #FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER #IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import operator import sys import types __author__ = "Benjamin Peterson <[email protected]>" __version__ = "1.2.0" # Revision 41c74fef2ded # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # This is a bit ugly, but it avoids running this again. delattr(tp, self.name) return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _MovedItems(types.ModuleType): """Lazy loading of moved objects""" _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) del attr moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_code = "__code__" _func_defaults = "__defaults__" _iterkeys = "keys" _itervalues = "values" _iteritems = "items" else: _meth_func = "im_func" _meth_self = "im_self" _func_code = "func_code" _func_defaults = "func_defaults" _iterkeys = "iterkeys" _itervalues = "itervalues" _iteritems = "iteritems" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator if PY3: def get_unbound_function(unbound): return unbound Iterator = object def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) else: def get_unbound_function(unbound): return unbound.im_func class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) def iterkeys(d): """Return an iterator over the keys of a dictionary.""" return iter(getattr(d, _iterkeys)()) def itervalues(d): """Return an iterator over the values of a dictionary.""" return iter(getattr(d, _itervalues)()) def iteritems(d): """Return an iterator over the (key, value) pairs of a dictionary.""" return iter(getattr(d, _iteritems)()) if PY3: def b(s): return s.encode("latin-1") def u(s): return s if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s def u(s): return unicode(s, "unicode_escape") int2byte = chr import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value print_ = getattr(builtins, "print") del builtins else: def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) def print_(*args, **kwargs): """The new-style print function.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("NewBase", (base,), {})
mit
pyfisch/servo
tests/wpt/web-platform-tests/tools/third_party/py/py/_io/saferepr.py
273
2483
import py import sys builtin_repr = repr reprlib = py.builtin._tryimport('repr', 'reprlib') class SafeRepr(reprlib.Repr): """ subclass of repr.Repr that limits the resulting size of repr() and includes information on exceptions raised during the call. """ def repr(self, x): return self._callhelper(reprlib.Repr.repr, self, x) def repr_unicode(self, x, level): # Strictly speaking wrong on narrow builds def repr(u): if "'" not in u: return py.builtin._totext("'%s'") % u elif '"' not in u: return py.builtin._totext('"%s"') % u else: return py.builtin._totext("'%s'") % u.replace("'", r"\'") s = repr(x[:self.maxstring]) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) j = max(0, self.maxstring-3-i) s = repr(x[:i] + x[len(x)-j:]) s = s[:i] + '...' + s[len(s)-j:] return s def repr_instance(self, x, level): return self._callhelper(builtin_repr, x) def _callhelper(self, call, x, *args): try: # Try the vanilla repr and make sure that the result is a string s = call(x, *args) except py.builtin._sysex: raise except: cls, e, tb = sys.exc_info() exc_name = getattr(cls, '__name__', 'unknown') try: exc_info = str(e) except py.builtin._sysex: raise except: exc_info = 'unknown' return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( exc_name, exc_info, x.__class__.__name__, id(x)) else: if len(s) > self.maxsize: i = max(0, (self.maxsize-3)//2) j = max(0, self.maxsize-3-i) s = s[:i] + '...' + s[len(s)-j:] return s def saferepr(obj, maxsize=240): """ return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib. """ # review exception handling srepr = SafeRepr() srepr.maxstring = maxsize srepr.maxsize = maxsize srepr.maxother = 160 return srepr.repr(obj)
mpl-2.0
azoft-dev-team/imagrium
env/Lib/test/test_format.py
12
10507
from test.test_support import verbose, have_unicode, TestFailed, is_jython import sys # test string formatting operator (I am not sure if this is being tested # elsewhere but, surely, some of the given cases are *not* tested because # they crash python) # test on unicode strings as well overflowok = 1 def testformat(formatstr, args, output=None): if verbose: if output: print "%s %% %s =? %s ..." %\ (repr(formatstr), repr(args), repr(output)), else: print "%s %% %s works? ..." % (repr(formatstr), repr(args)), try: result = formatstr % args except OverflowError: if not overflowok: raise if verbose: print 'overflow (this is fine)' else: if output and result != output: if verbose: print 'no' print "%s %% %s == %s != %s" %\ (repr(formatstr), repr(args), repr(result), repr(output)) else: if verbose: print 'yes' def testboth(formatstr, *args): testformat(formatstr, *args) if have_unicode: testformat(unicode(formatstr), *args) testboth("%.1d", (1,), "1") testboth("%.*d", (sys.maxint,1)) # expect overflow testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001') testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001') testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001') testboth("%f", (1.0,), "1.000000") # these are trying to test the limits of the internal magic-number-length # formatting buffer, if that number changes then these tests are less # effective testboth("%#.*g", (109, -1.e+49/3.)) testboth("%#.*g", (110, -1.e+49/3.)) testboth("%#.*g", (110, -1.e+100/3.)) # test some ridiculously large precision, expect overflow testboth('%12.*f', (123456, 1.0)) # Formatting of long integers. Overflow is not ok overflowok = 0 testboth("%x", 10L, "a") testboth("%x", 100000000000L, "174876e800") testboth("%o", 10L, "12") testboth("%o", 100000000000L, "1351035564000") testboth("%d", 10L, "10") testboth("%d", 100000000000L, "100000000000") big = 123456789012345678901234567890L testboth("%d", big, "123456789012345678901234567890") testboth("%d", -big, "-123456789012345678901234567890") testboth("%5d", -big, "-123456789012345678901234567890") testboth("%31d", -big, "-123456789012345678901234567890") testboth("%32d", -big, " -123456789012345678901234567890") testboth("%-32d", -big, "-123456789012345678901234567890 ") testboth("%032d", -big, "-0123456789012345678901234567890") testboth("%-032d", -big, "-123456789012345678901234567890 ") testboth("%034d", -big, "-000123456789012345678901234567890") testboth("%034d", big, "0000123456789012345678901234567890") testboth("%0+34d", big, "+000123456789012345678901234567890") testboth("%+34d", big, " +123456789012345678901234567890") testboth("%34d", big, " 123456789012345678901234567890") testboth("%.2d", big, "123456789012345678901234567890") testboth("%.30d", big, "123456789012345678901234567890") testboth("%.31d", big, "0123456789012345678901234567890") testboth("%32.31d", big, " 0123456789012345678901234567890") big = 0x1234567890abcdef12345L # 21 hex digits testboth("%x", big, "1234567890abcdef12345") testboth("%x", -big, "-1234567890abcdef12345") testboth("%5x", -big, "-1234567890abcdef12345") testboth("%22x", -big, "-1234567890abcdef12345") testboth("%23x", -big, " -1234567890abcdef12345") testboth("%-23x", -big, "-1234567890abcdef12345 ") testboth("%023x", -big, "-01234567890abcdef12345") testboth("%-023x", -big, "-1234567890abcdef12345 ") testboth("%025x", -big, "-0001234567890abcdef12345") testboth("%025x", big, "00001234567890abcdef12345") testboth("%0+25x", big, "+0001234567890abcdef12345") testboth("%+25x", big, " +1234567890abcdef12345") testboth("%25x", big, " 1234567890abcdef12345") testboth("%.2x", big, "1234567890abcdef12345") testboth("%.21x", big, "1234567890abcdef12345") testboth("%.22x", big, "01234567890abcdef12345") testboth("%23.22x", big, " 01234567890abcdef12345") testboth("%-23.22x", big, "01234567890abcdef12345 ") testboth("%X", big, "1234567890ABCDEF12345") testboth("%#X", big, "0X1234567890ABCDEF12345") testboth("%#x", big, "0x1234567890abcdef12345") testboth("%#x", -big, "-0x1234567890abcdef12345") testboth("%#.23x", -big, "-0x001234567890abcdef12345") testboth("%#+.23x", big, "+0x001234567890abcdef12345") testboth("%# .23x", big, " 0x001234567890abcdef12345") testboth("%#+.23X", big, "+0X001234567890ABCDEF12345") testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345") testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345") testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ") testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345") # next one gets two leading zeroes from precision, and another from the # 0 flag and the width testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345") # same, except no 0 flag testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345") big = 012345670123456701234567012345670L # 32 octal digits testboth("%o", big, "12345670123456701234567012345670") testboth("%o", -big, "-12345670123456701234567012345670") testboth("%5o", -big, "-12345670123456701234567012345670") testboth("%33o", -big, "-12345670123456701234567012345670") testboth("%34o", -big, " -12345670123456701234567012345670") testboth("%-34o", -big, "-12345670123456701234567012345670 ") testboth("%034o", -big, "-012345670123456701234567012345670") testboth("%-034o", -big, "-12345670123456701234567012345670 ") testboth("%036o", -big, "-00012345670123456701234567012345670") testboth("%036o", big, "000012345670123456701234567012345670") testboth("%0+36o", big, "+00012345670123456701234567012345670") testboth("%+36o", big, " +12345670123456701234567012345670") testboth("%36o", big, " 12345670123456701234567012345670") testboth("%.2o", big, "12345670123456701234567012345670") testboth("%.32o", big, "12345670123456701234567012345670") testboth("%.33o", big, "012345670123456701234567012345670") testboth("%34.33o", big, " 012345670123456701234567012345670") testboth("%-34.33o", big, "012345670123456701234567012345670 ") testboth("%o", big, "12345670123456701234567012345670") testboth("%#o", big, "012345670123456701234567012345670") testboth("%#o", -big, "-012345670123456701234567012345670") testboth("%#.34o", -big, "-0012345670123456701234567012345670") testboth("%#+.34o", big, "+0012345670123456701234567012345670") testboth("%# .34o", big, " 0012345670123456701234567012345670") testboth("%#+.34o", big, "+0012345670123456701234567012345670") testboth("%#-+.34o", big, "+0012345670123456701234567012345670") testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ") testboth("%#+37.34o", big, " +0012345670123456701234567012345670") # next one gets one leading zero from precision testboth("%.33o", big, "012345670123456701234567012345670") # base marker shouldn't change that, since "0" is redundant testboth("%#.33o", big, "012345670123456701234567012345670") # but reduce precision, and base marker should add a zero testboth("%#.32o", big, "012345670123456701234567012345670") # one leading zero from precision, and another from "0" flag & width testboth("%034.33o", big, "0012345670123456701234567012345670") # base marker shouldn't change that testboth("%0#34.33o", big, "0012345670123456701234567012345670") # Some small ints, in both Python int and long flavors). testboth("%d", 42, "42") testboth("%d", -42, "-42") testboth("%d", 42L, "42") testboth("%d", -42L, "-42") testboth("%#x", 1, "0x1") testboth("%#x", 1L, "0x1") testboth("%#X", 1, "0X1") testboth("%#X", 1L, "0X1") testboth("%#o", 1, "01") testboth("%#o", 1L, "01") testboth("%#o", 0, "0") testboth("%#o", 0L, "0") testboth("%o", 0, "0") testboth("%o", 0L, "0") testboth("%d", 0, "0") testboth("%d", 0L, "0") testboth("%#x", 0, "0x0") testboth("%#x", 0L, "0x0") testboth("%#X", 0, "0X0") testboth("%#X", 0L, "0X0") testboth("%x", 0x42, "42") testboth("%x", -0x42, "-42") testboth("%x", 0x42L, "42") testboth("%x", -0x42L, "-42") testboth("%o", 042, "42") testboth("%o", -042, "-42") testboth("%o", 042L, "42") testboth("%o", -042L, "-42") # Test exception for unknown format characters if verbose: print 'Testing exceptions' def test_exc(formatstr, args, exception, excmsg): try: testformat(formatstr, args) except exception, exc: if str(exc) == excmsg: if verbose: print "yes" else: if verbose: print 'no' print 'Unexpected ', exception, ':', repr(str(exc)) except: if verbose: print 'no' print 'Unexpected exception' raise else: raise TestFailed, 'did not get expected exception: %s' % excmsg test_exc('abc %a', 1, ValueError, "unsupported format character 'a' (0x61) at index 5") if have_unicode: test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError, "unsupported format character '?' (0x3000) at index 5") test_exc('%d', '1', TypeError, "int argument required") test_exc('%g', '1', TypeError, "float argument required") test_exc('no format', '1', TypeError, "not all arguments converted during string formatting") test_exc('no format', u'1', TypeError, "not all arguments converted during string formatting") test_exc(u'no format', '1', TypeError, "not all arguments converted during string formatting") test_exc(u'no format', u'1', TypeError, "not all arguments converted during string formatting") # for Jython, do we really need to support this? what's the use case # here! the problem in a nutshell is that it changes __oct__, __hex__ # such that they don't return a string, but later on the exception # will occur anyway. so seems like a lot of work for no value # class Foobar(long): # def __oct__(self): # # Returning a non-string should not blow up. # return self + 1 #test_exc('%o', Foobar(), TypeError, # "expected string or Unicode object, long found") if sys.maxint == 2**31-1 and not is_jython: # crashes 2.2.1 and earlier: try: "%*d"%(sys.maxint, -127) except MemoryError: pass else: raise TestFailed, '"%*d"%(sys.maxint, -127) should fail'
mit
vlinhd11/vlinhd11-android-scripting
python/src/Tools/modulator/ScrolledListbox.py
37
1477
# A ScrolledList widget feels like a list widget but also has a # vertical scroll bar on its right. (Later, options may be added to # add a horizontal bar as well, to make the bars disappear # automatically when not needed, to move them to the other side of the # window, etc.) # # Configuration options are passed to the List widget. # A Frame widget is inserted between the master and the list, to hold # the Scrollbar widget. # Most methods calls are inherited from the List widget; Pack methods # are redirected to the Frame widget however. from Tkinter import * from Tkinter import _cnfmerge class ScrolledListbox(Listbox): def __init__(self, master=None, cnf={}): cnf = _cnfmerge(cnf) fcnf = {} vcnf = {'name': 'vbar', Pack: {'side': 'right', 'fill': 'y'},} for k in cnf.keys(): if type(k) == ClassType or k == 'name': fcnf[k] = cnf[k] del cnf[k] self.frame = Frame(master, fcnf) self.vbar = Scrollbar(self.frame, vcnf) cnf[Pack] = {'side': 'left', 'fill': 'both', 'expand': 'yes'} cnf['name'] = 'list' Listbox.__init__(self, self.frame, cnf) self['yscrollcommand'] = (self.vbar, 'set') self.vbar['command'] = (self, 'yview') # Copy Pack methods of self.frame -- hack! for m in Pack.__dict__.keys(): if m[0] != '_' and m != 'config': setattr(self, m, getattr(self.frame, m))
apache-2.0
mhum/ynab-enhanced
src/common/res/features/l10n/init.py
1
3511
#!/usr/bin/env python """Prepare and download l10ns.""" import urllib, urllib2 import shutil import os import zipfile import json import sys import math if len(sys.argv) != 2: print '' print 'ERROR:' print '' print 'Please supply a crowd in API key, obtained on this page:' print 'http://translate.toolkitforynab.com/project/toolkit-for-ynab/settings#integration\n' print 'Example: ./get_l10ns <api key>' print '' exit(1) ID = 'toolkit-for-ynab' KEY = sys.argv[1:][0] API_PREFIX = 'https://api.crowdin.com/api/project/%s/' % ID KEY_SUFFIX = '?key=%s' % KEY FILENAME = 'all.zip' DEST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'locales') def export_l10ns(): """Force crowding to export l10ns.""" url = API_PREFIX + 'export' + KEY_SUFFIX response = urllib2.urlopen(url) html = response.read() return (html.find('success status') >= 0) def donwload_l10ns(): """Download all l10ns in zip archive.""" url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX l10ns_file = urllib2.urlopen(url) with open('all.zip','wb') as f: f.write(l10ns_file.read()) return True def get_l10ns_stats(): url = API_PREFIX + "status" + KEY_SUFFIX + "&json=true" response = urllib2.urlopen(url) j = response.read() lang_completed = {} for i in json.loads(j): lang_completed[i['name']] = int(math.ceil(int(i["words_translated"])/float(i["words"])*100)) return lang_completed def unpack(lang_completed): """Unpack l10ns, move to one folder, add js initializer.""" os.path.isdir(DEST_DIR) and shutil.rmtree(DEST_DIR) zipfile.ZipFile(FILENAME).extractall(DEST_DIR) for root, dirs, files in os.walk(DEST_DIR): for name in files: if lang_completed[name.split('.')[0]] != 0: shutil.move(os.path.join(root, name), DEST_DIR) # Prepend all JSONs with Ember declaration. with open(os.path.join(DEST_DIR, name), 'r+') as f: content = f.read() f.seek(0, 0) f.write('ynabToolKit.l10nData = ' + content) for root, dirs, files in os.walk(DEST_DIR): for name in dirs: shutil.rmtree(os.path.join(root, name)) os.remove(FILENAME) def create_settings(lang_completed): """Generate settings.json file.""" settings = { "name": "l10n", "type": "select", "default": "0", "section": "general", "title": "Localization of YNAB", "description": "Localization of interface.", "options": [ { "name": "Default", "value": "0" } ], "actions": {}} for root, dirs, files in os.walk(DEST_DIR): for name in files: if lang_completed[name.split('.')[0]] != 0: value = name.split('.')[0].lower() percent = ' (%s%%)' % str(int(lang_completed[name.split('.')[0]])) settings['options'].append({ "name": name.split('.')[0] + percent, "value": value }) settings['actions'][value] = ["injectScript", "locales/" + name, "injectScript", "main.js"] with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'settings.json'), 'w') as f: json.dump(settings, f, indent=4) lang_completed = get_l10ns_stats() export_l10ns() donwload_l10ns() unpack(lang_completed) create_settings(lang_completed)
mit
sssstest/GameEditor
GameTrigger.py
1
1438
#!/usr/bin/env python #@section License # #Copyright (C) 2013 ssss #This file is a part of the GameEditor. # #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. from GameResource import * class GameTrigger(GameResource): if Class: MomentMiddle=0 MomentBegin=1 MomentEnd=2 defaults={"id":-1,"name":"noname","condition":"","momentOfChecking":MomentBegin,"constantName":""} def __init__(self, gameFile, id): GameResource.__init__(self, gameFile, id) def ReadGmk(self, stream): triggerStream = stream.Deserialize() if not triggerStream.ReadBoolean(): self.exists = False return triggerStream.ReadDword() self.setMember("name", triggerStream.ReadString()) self.setMember("condition", triggerStream.ReadString()) self.setMember("momentOfChecking", triggerStream.ReadDword()) self.setMember("constantName", triggerStream.ReadString())
gpl-3.0
lmorchard/django-allauth
allauth/socialaccount/south_migrations/0011_auto__chg_field_socialtoken_token.py
77
6468
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'SocialToken.token' db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.TextField')()) def backwards(self, orm): # Changing field 'SocialToken.token' db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.CharField')(max_length=255)) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'socialaccount.socialaccount': { 'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'socialaccount.socialapp': { 'Meta': {'object_name': 'SocialApp'}, 'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}) }, 'socialaccount.socialtoken': { 'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}), 'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'token': ('django.db.models.fields.TextField', [], {}), 'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}) } } complete_apps = ['socialaccount']
mit
partofthething/home-assistant
tests/components/ruckus_unleashed/test_init.py
3
3616
"""Test the Ruckus Unleashed config flow.""" from unittest.mock import patch from pyruckus.exceptions import AuthenticationError from homeassistant.components.ruckus_unleashed import ( API_AP, API_DEVICE_NAME, API_ID, API_MAC, API_MODEL, API_SYSTEM_OVERVIEW, API_VERSION, DOMAIN, MANUFACTURER, ) from homeassistant.config_entries import ( ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED, ENTRY_STATE_SETUP_RETRY, ) from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC from tests.components.ruckus_unleashed import ( DEFAULT_AP_INFO, DEFAULT_SYSTEM_INFO, DEFAULT_TITLE, init_integration, mock_config_entry, ) async def test_setup_entry_login_error(hass): """Test entry setup failed due to login error.""" entry = mock_config_entry() with patch( "homeassistant.components.ruckus_unleashed.Ruckus", side_effect=AuthenticationError, ): entry.add_to_hass(hass) result = await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert result is False async def test_setup_entry_connection_error(hass): """Test entry setup failed due to connection error.""" entry = mock_config_entry() with patch( "homeassistant.components.ruckus_unleashed.Ruckus", side_effect=ConnectionError, ): entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert entry.state == ENTRY_STATE_SETUP_RETRY async def test_router_device_setup(hass): """Test a router device is created.""" await init_integration(hass) device_info = DEFAULT_AP_INFO[API_AP][API_ID]["1"] device_registry = await hass.helpers.device_registry.async_get_registry() device = device_registry.async_get_device( identifiers={(CONNECTION_NETWORK_MAC, device_info[API_MAC])}, connections={(CONNECTION_NETWORK_MAC, device_info[API_MAC])}, ) assert device assert device.manufacturer == MANUFACTURER assert device.model == device_info[API_MODEL] assert device.name == device_info[API_DEVICE_NAME] assert device.sw_version == DEFAULT_SYSTEM_INFO[API_SYSTEM_OVERVIEW][API_VERSION] assert device.via_device_id is None async def test_unload_entry(hass): """Test successful unload of entry.""" entry = await init_integration(hass) assert len(hass.config_entries.async_entries(DOMAIN)) == 1 assert entry.state == ENTRY_STATE_LOADED assert await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() assert entry.state == ENTRY_STATE_NOT_LOADED assert not hass.data.get(DOMAIN) async def test_config_not_ready_during_setup(hass): """Test we throw a ConfigNotReady if Coordinator update fails.""" entry = mock_config_entry() with patch( "homeassistant.components.ruckus_unleashed.Ruckus.connect", return_value=None, ), patch( "homeassistant.components.ruckus_unleashed.Ruckus.mesh_name", return_value=DEFAULT_TITLE, ), patch( "homeassistant.components.ruckus_unleashed.Ruckus.system_info", return_value=DEFAULT_SYSTEM_INFO, ), patch( "homeassistant.components.ruckus_unleashed.RuckusUnleashedDataUpdateCoordinator._async_update_data", side_effect=ConnectionError, ): entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert entry.state == ENTRY_STATE_SETUP_RETRY
mit
wzbozon/statsmodels
statsmodels/tools/print_version.py
23
7951
#!/usr/bin/env python from __future__ import print_function from statsmodels.compat.python import reduce import sys from os.path import dirname def safe_version(module, attr='__version__'): if not isinstance(attr, list): attr = [attr] try: return reduce(getattr, [module] + attr) except AttributeError: return "Cannot detect version" def _show_versions_only(): print("\nINSTALLED VERSIONS") print("------------------") print("Python: %d.%d.%d.%s.%s" % sys.version_info[:]) try: import os (sysname, nodename, release, version, machine) = os.uname() print("OS: %s %s %s %s" % (sysname, release, version, machine)) print("byteorder: %s" % sys.byteorder) print("LC_ALL: %s" % os.environ.get('LC_ALL', "None")) print("LANG: %s" % os.environ.get('LANG', "None")) except: pass try: from statsmodels import version has_sm = True except ImportError: has_sm = False print('\nStatsmodels\n===========\n') if has_sm: print('Installed: %s' % safe_version(version, 'full_version')) else: print('Not installed') print("\nRequired Dependencies\n=====================\n") try: import Cython print("cython: %s" % safe_version(Cython)) except ImportError: print("cython: Not installed") try: import numpy print("numpy: %s" % safe_version(numpy, ['version', 'version'])) except ImportError: print("numpy: Not installed") try: import scipy print("scipy: %s" % safe_version(scipy, ['version', 'version'])) except ImportError: print("scipy: Not installed") try: import pandas print("pandas: %s" % safe_version(pandas, ['version', 'version'])) except ImportError: print("pandas: Not installed") try: import dateutil print(" dateutil: %s" % safe_version(dateutil)) except ImportError: print(" dateutil: not installed") try: import patsy print("patsy: %s" % safe_version(patsy)) except ImportError: print("patsy: Not installed") print("\nOptional Dependencies\n=====================\n") try: import matplotlib as mpl print("matplotlib: %s" % safe_version(mpl)) except ImportError: print("matplotlib: Not installed") try: from cvxopt import info print("cvxopt: %s" % safe_version(info, 'version')) except ImportError: print("cvxopt: Not installed") print("\nDeveloper Tools\n================\n") try: import IPython print("IPython: %s" % safe_version(IPython)) except ImportError: print("IPython: Not installed") try: import jinja2 print(" jinja2: %s" % safe_version(jinja2)) except ImportError: print(" jinja2: Not installed") try: import sphinx print("sphinx: %s" % safe_version(sphinx)) except ImportError: print("sphinx: Not installed") try: import pygments print(" pygments: %s" % safe_version(pygments)) except ImportError: print(" pygments: Not installed") try: import nose print("nose: %s" % safe_version(nose)) except ImportError: print("nose: Not installed") try: import virtualenv print("virtualenv: %s" % safe_version(virtualenv)) except ImportError: print("virtualenv: Not installed") print("\n") def show_versions(show_dirs=True): if not show_dirs: _show_versions_only() print("\nINSTALLED VERSIONS") print("------------------") print("Python: %d.%d.%d.%s.%s" % sys.version_info[:]) try: import os (sysname, nodename, release, version, machine) = os.uname() print("OS: %s %s %s %s" % (sysname, release, version, machine)) print("byteorder: %s" % sys.byteorder) print("LC_ALL: %s" % os.environ.get('LC_ALL', "None")) print("LANG: %s" % os.environ.get('LANG', "None")) except: pass try: import statsmodels from statsmodels import version has_sm = True except ImportError: has_sm = False print('\nStatsmodels\n===========\n') if has_sm: print('Installed: %s (%s)' % (safe_version(version, 'full_version'), dirname(statsmodels.__file__))) else: print('Not installed') print("\nRequired Dependencies\n=====================\n") try: import Cython print("cython: %s (%s)" % (safe_version(Cython), dirname(Cython.__file__))) except ImportError: print("cython: Not installed") try: import numpy print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']), dirname(numpy.__file__))) except ImportError: print("numpy: Not installed") try: import scipy print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']), dirname(scipy.__file__))) except ImportError: print("scipy: Not installed") try: import pandas print("pandas: %s (%s)" % (safe_version(pandas, ['version', 'version']), dirname(pandas.__file__))) except ImportError: print("pandas: Not installed") try: import dateutil print(" dateutil: %s (%s)" % (safe_version(dateutil), dirname(dateutil.__file__))) except ImportError: print(" dateutil: not installed") try: import patsy print("patsy: %s (%s)" % (safe_version(patsy), dirname(patsy.__file__))) except ImportError: print("patsy: Not installed") print("\nOptional Dependencies\n=====================\n") try: import matplotlib as mpl print("matplotlib: %s (%s)" % (safe_version(mpl), dirname(mpl.__file__))) except ImportError: print("matplotlib: Not installed") try: from cvxopt import info print("cvxopt: %s (%s)" % (safe_version(info, 'version'), dirname(info.__file__))) except ImportError: print("cvxopt: Not installed") print("\nDeveloper Tools\n================\n") try: import IPython print("IPython: %s (%s)" % (safe_version(IPython), dirname(IPython.__file__))) except ImportError: print("IPython: Not installed") try: import jinja2 print(" jinja2: %s (%s)" % (safe_version(jinja2), dirname(jinja2.__file__))) except ImportError: print(" jinja2: Not installed") try: import sphinx print("sphinx: %s (%s)" % (safe_version(sphinx), dirname(sphinx.__file__))) except ImportError: print("sphinx: Not installed") try: import pygments print(" pygments: %s (%s)" % (safe_version(pygments), dirname(pygments.__file__))) except ImportError: print(" pygments: Not installed") try: import nose print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__))) except ImportError: print("nose: Not installed") try: import virtualenv print("virtualenv: %s (%s)" % (safe_version(virtualenv), dirname(virtualenv.__file__))) except ImportError: print("virtualenv: Not installed") print("\n") if __name__ == "__main__": show_versions()
bsd-3-clause
cjhak/b2share
invenio/modules/uploader/uploader_tasks.py
13
13474
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Uploader workflow tasks. Those are the main/common tasks that the uploader will use, they are used inside the workflows defined in :py:mod:`~invenio.modules.uploader.workflows`. See: `Simple workflows for Python <https://pypi.python.org/pypi/workflow/1.0>`_ """ import os from invenio.base.globals import cfg from invenio.modules.pidstore.models import PersistentIdentifier from .errors import UploaderWorkflowException ########################################################### ############## Pre tasks ################# ########################################################### def create_records_for_workflow(records, **kwargs): """Create the record object from the json. :param records: List of records to be process. :kwargs: """ from invenio.modules.records.api import Record for i, obj in enumerate(records): records[i] = (obj[0], Record(json=obj[1])) ########################################################### ############## Post tasks ################# ########################################################### def return_recordids_only(records, **kwargs): """Retrieve from the records only the record ID to return them. :param records: Processed list of records :parma kwargs: """ for i, obj in enumerate(records): records[i] = obj[1].get('recid') ########################################################### ############## Workflow tasks ################# ########################################################### def raise_(ex): """Helper task to raise an exception.""" def _raise_(obj, eng): raise ex return _raise_ def validate(step): """Validate the record. Validate the record using the `validate` method present in each record and the validation mode, either from the command line options or from `UPLOADER_VALIDATION_MODE`. For the validation the `schema` information from the field definition is used, see `invenio.modules.jsonalchemy.jsonext.parsers.schema_parser`. """ def _validate(obj, eng): record = obj[1] mode = eng.getVar('options', {}).get('validation_mode', cfg['UPLOADER_VALIDATION_MODE']) eng.log.info("Validating record using mode: '%s'", (mode, )) if not hasattr(record, 'validate'): raise UploaderWorkflowException( step, msg="An 'validate' method is needed") validator_errors = record.validate() eng.log.info('Validation errors: %s' % (str(validator_errors), )) if mode.lower() == 'strict' and validator_errors: raise UploaderWorkflowException( step, msg="One or more validation errors have occurred, please" " check them or change the 'validation_mode' to " "'permissive'.\n%s" % (str(validator_errors), )) eng.log.info('Finish validating the current record') return _validate def retrieve_record_id_from_pids(step): """Retrieve the record identifier from a record using its PIDS. If any PID matches with any in the DB then the record id found is set to the current `record` """ def _retrieve_record_id_from_pids(obj, eng): record = obj[1] eng.log.info('Look for PIDs inside the current record') if not hasattr(record, 'persistent_identifiers'): raise UploaderWorkflowException( step, msg="An 'persistent_identifiers' method is needed") for pid_name, pid_values in record.persistent_identifiers: eng.log.info("Found PID '%s' trying to match it", (pid_name, )) matching_recids = set() for possible_pid in pid_values: eng.log.info("Looking for PID %s", (possible_pid, )) pid = PersistentIdentifier.get( possible_pid.get('type'), possible_pid.get('value'), possible_pid.get('provider')) if pid: eng.log.info("PID found in the data base %s", (pid.object_value, )) matching_recids.add(pid.object_value) if len(matching_recids) > 1: raise UploaderWorkflowException( step, msg="Found multiple match in the database, %s " "for '%s'" % (repr(matching_recids), pid_name)) elif matching_recids: record['recid'] = matching_recids.pop() eng.log.info( 'Finish looking for PIDs inside the current record') break eng.log.info('Finish looking for PIDs inside the current record') return _retrieve_record_id_from_pids def reserve_record_id(step): """Reserve a new record id for the current object and set it inside.""" # TODO: manage exceptions in a better way def _reserve_record_id(obj, eng): record = obj[1] eng.log.info('Reserve a recid for the new record') try: pid = PersistentIdentifier.create('recid', pid_value=None, pid_provider='invenio') record['recid'] = int(pid.pid_value) pid.reserve() eng.log.info("Finish reserving a recid '%s' for the new record", (pid.pid_value, )) except Exception as e: raise UploaderWorkflowException(step, e.message) return _reserve_record_id def save_record(step): """Save the record to the DB using the `_save` method from it.""" def _save(obj, eng): record = obj[1] eng.log.info('Saving record to DB') if not hasattr(record, '_save'): raise UploaderWorkflowException( step, msg="An '_save' method is needed") try: record._save() eng.log.info('Record saved to DB') except Exception as e: raise UploaderWorkflowException(step, e.message) return _save def save_master_format(step): """Put the master format info the `bfmt` DB table.""" def _save_master_format(obj, eng): from invenio.base.helpers import utf8ifier from invenio.modules.formatter.models import Bibfmt from invenio.ext.sqlalchemy import db from zlib import compress eng.log.info('Saving master record to DB') bibfmt = Bibfmt(id_bibrec=obj[1]['recid'], format=obj[1].additional_info.master_format, kind='master', last_updated=obj[1]['modification_date'], value=compress(utf8ifier( obj[0] if obj[1].additional_info.master_format == 'marc' else obj[1].legacy_export_as_marc() ))) db.session.add(bibfmt) db.session.commit() eng.log.info('Master record saved to DB') return _save_master_format def update_pidstore(step): """Save each PID present in the record to the PID storage.""" # TODO: manage exceptions def _update_pidstore(obj, eng): record = obj[1] eng.log.info('Look for PIDs inside the current record and register ' 'them in the DB') if not hasattr(record, 'persistent_identifiers'): raise UploaderWorkflowException( step, msg="An 'persistent_identifiers' method is needed") eng.log.info("Found PIDs '%s'", (record.persistent_identifiers, )) for pid_name, pid_values in record.persistent_identifiers: eng.log.info("Found PID '%s'", (pid_name, )) for pid_value in pid_values: pid = PersistentIdentifier.get( pid_value.get('type'), pid_value.get('value'), pid_value.get('provider')) if pid is None: pid = PersistentIdentifier.create( pid_value.get('type'), pid_value.get('value'), pid_value.get('provider')) if not pid.has_object('rec', record['recid']): pid.assign('rec', record['recid']) eng.log.info('Finish looking for PIDs inside the current record and ' 'register them in the DB') return _update_pidstore def manage_attached_documents(step): """Attach and treat all the documents embeded in the input filex.""" from invenio.modules.documents import api from invenio.modules.documents.tasks import set_document_contents from invenio.modules.records.utils import name_generator def _manage_attached_documents(obj, eng): record = obj[1] filename = eng.getVar('options').get('filename') dirname = os.path.abspath(os.path.dirname(filename)) \ if filename is not None else os.curdir def _check_path(source): """Check if the ``source`` path. If it is relative path than the directory path of original blob filename, if defined, or the current directory will be prepended. """ if not os.path.isabs(source): new_source = os.path.join(dirname, source) if os.path.exists(new_source): return new_source eng.log.error('File %s does not exist.', (new_source,)) return source eng.log.info('Look documents to manage') def _create_document(metadata, record): metadata['source'] = _check_path(metadata['source']) if '_documents' not in record: record['_documents'] = [] model = metadata.pop('model', 'record_document_base') if 'recids' not in metadata: metadata['recids'] = list() if record.get('recid', -1) not in metadata['recids']: metadata['recids'].append(record.get('recid', -1), ) document = api.Document.create(metadata, model=model) eng.log.info('Document %s created', (document['_id'],)) record['_documents'].append((document['title'], document['_id'])) return document if 'files_to_upload' in record: eng.log.info('Documents to upload found') files_to_upload = record.get('files_to_upload', []) for file_to_upload in files_to_upload: document = _create_document(file_to_upload, record) set_document_contents.delay( document['_id'], document['source'], name_generator(document) ) eng.log.info('Finish creating documents, delete temporary key') del record['files_to_upload'] if 'files_to_link' in record: eng.log.info('Documents to link found') files_to_link = record.get('files_to_link', []) for file_to_link in files_to_link: _create_document(file_to_link, record) eng.log.info('Finish linking documents, delete temporary key') del record['files_to_link'] return _manage_attached_documents def legacy(step): """Update legacy bibxxx tables.""" def _legacy(obj, eng): record = obj[1] if record.additional_info.master_format != 'marc': return import marshal from invenio.legacy.bibupload.engine import ( CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, archive_marcxml_for_history, update_bibfmt_format, update_database_with_metadata, ) modification_date = record['modification_date'].strftime( '%Y-%m-%d %H:%M:%S') update_bibfmt_format( record['recid'], record.legacy_export_as_marc(), 'xm', modification_date ) if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE: update_bibfmt_format( record['recid'], marshal.dumps(record.legacy_create_recstruct()), 'recstruct', modification_date ) if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS: archive_marcxml_for_history( record['recid'], affected_fields={} ) update_database_with_metadata( record.legacy_create_recstruct(), record['recid'] ) eng.log.info( 'Finishing legacy task for record {0}'.format(record['recid']) ) return _legacy
gpl-2.0
PongPi/isl-odoo
openerp/addons/base/__openerp__.py
336
3703
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base', 'version': '1.3', 'category': 'Hidden', 'description': """ The kernel of OpenERP, needed for all installation. =================================================== """, 'author': 'OpenERP SA', 'maintainer': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': [], 'data': [ 'base_data.xml', 'res/res_currency_data.xml', 'res/res_country_data.xml', 'security/base_security.xml', 'base_menu.xml', 'res/res_config.xml', 'res/res.country.state.csv', 'ir/ir_actions.xml', 'ir/ir_config_parameter_view.xml', 'ir/ir_cron_view.xml', 'ir/ir_filters.xml', 'ir/ir_mail_server_view.xml', 'ir/ir_model_view.xml', 'ir/ir_attachment_view.xml', 'ir/ir_rule_view.xml', 'ir/ir_sequence_view.xml', 'ir/ir_translation_view.xml', 'ir/ir_ui_menu_view.xml', 'ir/ir_ui_view_view.xml', 'ir/ir_values_view.xml', 'ir/osv_memory_autovacuum.xml', 'ir/ir_model_report.xml', 'ir/ir_logging_view.xml', 'ir/ir_qweb.xml', 'workflow/workflow_view.xml', 'module/module_view.xml', 'module/module_data.xml', 'module/module_report.xml', 'module/wizard/base_module_update_view.xml', 'module/wizard/base_language_install_view.xml', 'module/wizard/base_import_language_view.xml', 'module/wizard/base_module_upgrade_view.xml', 'module/wizard/base_module_configuration_view.xml', 'module/wizard/base_export_language_view.xml', 'module/wizard/base_update_translations_view.xml', 'module/wizard/base_module_immediate_install.xml', 'res/res_company_view.xml', 'res/res_request_view.xml', 'res/res_lang_view.xml', 'res/res_partner_report.xml', 'res/res_partner_view.xml', 'res/res_bank_view.xml', 'res/res_country_view.xml', 'res/res_currency_view.xml', 'res/res_users_view.xml', 'res/res_partner_data.xml', 'res/ir_property_view.xml', 'res/res_security.xml', 'security/ir.model.access.csv', ], 'demo': [ 'base_demo.xml', 'res/res_partner_demo.xml', 'res/res_partner_demo.yml', 'res/res_partner_image_demo.xml', ], 'test': [ 'tests/base_test.yml', 'tests/test_osv_expression.yml', 'tests/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules. ], 'installable': True, 'auto_install': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mafiya69/sympy
sympy/vector/tests/test_dyadic.py
94
4076
from sympy import sin, cos, symbols, pi, ImmutableMatrix as Matrix, \ simplify from sympy.vector import (CoordSysCartesian, Vector, Dyadic, DyadicAdd, DyadicMul, DyadicZero, BaseDyadic, express) A = CoordSysCartesian('A') def test_dyadic(): a, b = symbols('a, b') assert Dyadic.zero != 0 assert isinstance(Dyadic.zero, DyadicZero) assert BaseDyadic(A.i, A.j) != BaseDyadic(A.j, A.i) assert (BaseDyadic(Vector.zero, A.i) == BaseDyadic(A.i, Vector.zero) == Dyadic.zero) d1 = A.i | A.i d2 = A.j | A.j d3 = A.i | A.j assert isinstance(d1, BaseDyadic) d_mul = a*d1 assert isinstance(d_mul, DyadicMul) assert d_mul.base_dyadic == d1 assert d_mul.measure_number == a assert isinstance(a*d1 + b*d3, DyadicAdd) assert d1 == A.i.outer(A.i) assert d3 == A.i.outer(A.j) v1 = a*A.i - A.k v2 = A.i + b*A.j assert v1 | v2 == v1.outer(v2) == a * (A.i|A.i) + (a*b) * (A.i|A.j) +\ - (A.k|A.i) - b * (A.k|A.j) assert d1 * 0 == Dyadic.zero assert d1 != Dyadic.zero assert d1 * 2 == 2 * (A.i | A.i) assert d1 / 2. == 0.5 * d1 assert d1.dot(0 * d1) == Vector.zero assert d1 & d2 == Dyadic.zero assert d1.dot(A.i) == A.i == d1 & A.i assert d1.cross(Vector.zero) == Dyadic.zero assert d1.cross(A.i) == Dyadic.zero assert d1 ^ A.j == d1.cross(A.j) assert d1.cross(A.k) == - A.i | A.j assert d2.cross(A.i) == - A.j | A.k == d2 ^ A.i assert A.i ^ d1 == Dyadic.zero assert A.j.cross(d1) == - A.k | A.i == A.j ^ d1 assert Vector.zero.cross(d1) == Dyadic.zero assert A.k ^ d1 == A.j | A.i assert A.i.dot(d1) == A.i & d1 == A.i assert A.j.dot(d1) == Vector.zero assert Vector.zero.dot(d1) == Vector.zero assert A.j & d2 == A.j assert d1.dot(d3) == d1 & d3 == A.i | A.j == d3 assert d3 & d1 == Dyadic.zero q = symbols('q') B = A.orient_new_axis('B', q, A.k) assert express(d1, B) == express(d1, B, B) assert express(d1, B) == ((cos(q)**2) * (B.i | B.i) + (-sin(q) * cos(q)) * (B.i | B.j) + (-sin(q) * cos(q)) * (B.j | B.i) + (sin(q)**2) * (B.j | B.j)) assert express(d1, B, A) == (cos(q)) * (B.i | A.i) + (-sin(q)) * (B.j | A.i) assert express(d1, A, B) == (cos(q)) * (A.i | B.i) + (-sin(q)) * (A.i | B.j) assert d1.to_matrix(A) == Matrix([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) assert d1.to_matrix(A, B) == Matrix([[cos(q), -sin(q), 0], [0, 0, 0], [0, 0, 0]]) assert d3.to_matrix(A) == Matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) a, b, c, d, e, f = symbols('a, b, c, d, e, f') v1 = a * A.i + b * A.j + c * A.k v2 = d * A.i + e * A.j + f * A.k d4 = v1.outer(v2) assert d4.to_matrix(A) == Matrix([[a * d, a * e, a * f], [b * d, b * e, b * f], [c * d, c * e, c * f]]) d5 = v1.outer(v1) C = A.orient_new_axis('C', q, A.i) for expected, actual in zip(C.rotation_matrix(A) * d5.to_matrix(A) * \ C.rotation_matrix(A).T, d5.to_matrix(C)): assert (expected - actual).simplify() == 0 def test_dyadic_simplify(): x, y, z, k, n, m, w, f, s, A = symbols('x, y, z, k, n, m, w, f, s, A') N = CoordSysCartesian('N') dy = N.i | N.i test1 = (1 / x + 1 / y) * dy assert (N.i & test1 & N.i) != (x + y) / (x * y) test1 = test1.simplify() assert test1.simplify() == simplify(test1) assert (N.i & test1 & N.i) == (x + y) / (x * y) test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * dy test2 = test2.simplify() assert (N.i & test2 & N.i) == (A**2 * s**4 / (4 * pi * k * m**3)) test3 = ((4 + 4 * x - 2 * (2 + 2 * x)) / (2 + 2 * x)) * dy test3 = test3.simplify() assert (N.i & test3 & N.i) == 0 test4 = ((-4 * x * y**2 - 2 * y**3 - 2 * x**2 * y) / (x + y)**2) * dy test4 = test4.simplify() assert (N.i & test4 & N.i) == -2 * y
bsd-3-clause
mkheirkhah/mptcp
src/config-store/bindings/modulegen__gcc_ILP32.py
36
68421
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.config_store', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## file-config.h (module 'config-store'): ns3::FileConfig [class] module.add_class('FileConfig', allow_subclassing=True) ## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class] module.add_class('GtkConfigStore') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## file-config.h (module 'config-store'): ns3::NoneFileConfig [class] module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig']) ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## config-store.h (module 'config-store'): ns3::ConfigStore [class] module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase']) ## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration] module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore']) ## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration] module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_methods(root_module): register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig']) register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3FileConfig_methods(root_module, cls): ## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor] cls.add_constructor([]) ## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor] cls.add_constructor([param('ns3::FileConfig const &', 'arg0')]) ## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function] cls.add_method('Attributes', 'void', [], is_pure_virtual=True, is_virtual=True) ## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function] cls.add_method('Default', 'void', [], is_pure_virtual=True, is_virtual=True) ## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function] cls.add_method('Global', 'void', [], is_pure_virtual=True, is_virtual=True) ## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function] cls.add_method('SetFilename', 'void', [param('std::string', 'filename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3GtkConfigStore_methods(root_module, cls): ## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor] cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')]) ## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor] cls.add_constructor([]) ## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function] cls.add_method('ConfigureAttributes', 'void', []) ## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function] cls.add_method('ConfigureDefaults', 'void', []) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3NoneFileConfig_methods(root_module, cls): ## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor] cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')]) ## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor] cls.add_constructor([]) ## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function] cls.add_method('Attributes', 'void', [], is_virtual=True) ## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function] cls.add_method('Default', 'void', [], is_virtual=True) ## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function] cls.add_method('Global', 'void', [], is_virtual=True) ## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function] cls.add_method('SetFilename', 'void', [param('std::string', 'filename')], is_virtual=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3ConfigStore_methods(root_module, cls): ## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')]) ## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor] cls.add_constructor([]) ## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function] cls.add_method('ConfigureAttributes', 'void', []) ## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function] cls.add_method('ConfigureDefaults', 'void', []) ## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function] cls.add_method('SetFileFormat', 'void', [param('ns3::ConfigStore::FileFormat', 'format')]) ## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function] cls.add_method('SetFilename', 'void', [param('std::string', 'filename')]) ## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::ConfigStore::Mode', 'mode')]) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
cisc474/board_game_app
api/fetch_desc_bs4.py
2
3669
#importing the libraries I want, an sqlite interface, a url requester, and an XML parsing library import sqlite3, urllib2, bs4 as BeautifulSoup #sudo apt-get install python-bs4 #That's how I was able to install the latest beautiful soup #this bit I have to google search everytime. con = sqlite3.connect('bgg.sqlite') cur = con.cursor() #this gets an array of responses, not in the best format, I made all of this by #FIRST going into a python session then playing with the results until I liked it cur.execute('select objectid as id from games') data = cur.fetchall() #con.commit will make sure the database is saved and the next "transaction" can begin con.commit() #Note the "create table if not exists" part, that's so I can run this script over and over #even after I've had to debug cur.execute('create table if not exists extra (objectid integer primary key, description text, thumbnail text, image text, categories text)') con.commit() #this is a simple way of cleaning up the data that came out of my earlier query #I use this lambda trick often in python, it's a simple one-off function which #"returns" the part after the : on each element in data gameids = map(lambda x: x[0], data) #a simple Python Class just to be classy class DescRow: def create_sql(self): return ["insert or replace into extra (objectid, description, thumbnail, image, categories) values (?, ?, ?, ?, ?)", (self.objectid, self.description, self.thumbnail, self.image, self.categories)] #another simple procedure to hit the BGG API in the right way def url_gen(gameid): return "http://www.boardgamegeek.com/xmlapi2/thing?stats=1&id=%s" % gameid #one main game look up def fetch_game_data(gameid): #this gets the server response from the BGG API response = urllib2.urlopen(url_gen(gameid)) #this saves the text response in one long string xml = response.read() #this creates a beautifulsoup tree out of the xml #I used to do all of this by hand using regular expressions #Now I dig using beautifulsoup to parse my webpages and xml responses bs_tree = BeautifulSoup.BeautifulSoup(xml) game_data = DescRow() game_data.objectid = gameid #This is where I really needed a "by-hand" example to get it right but # this will go to the first "description" tag and return the contents as a string game_data.description = bs_tree.find('description').text #ditto for "thumbnail" and "image" after that game_data.thumbnail = bs_tree.find('thumbnail').text game_data.image = bs_tree.find('image').text #this is my way of making a category data set for the DB #if someone is in a particular mood, party games or card games or whatever #this might do the job game_data.categories = " @@ ".join(map(lambda x: x.attrs['value'], bs_tree.find_all('link', attrs={"type": "boardgamecategory"}))) return game_data #this procedure does one row creation transaction def create_row(gameid, cursor, con): game_data = fetch_game_data(gameid) sql_query = game_data.create_sql() cursor.execute(sql_query[0], sql_query[1]) con.commit() return game_data import time #I used this while debugging, I'll leave it here to show that I am very human. errors = [] #This is the main program, it goes through each gameid and creates a row #I put a 1 second pause between each command because I was having the API #cut me off or get backed up, this worked for making sure the API was friendly to me for gameid in gameids: gdata = create_row(gameid, cur, con) time.sleep(1) #this is just so I know that things were working print gdata.objectid, gdata.categories
mit
dfm/arxiv2speech
arxiv2speech.py
1
3104
#!/usr/bin/env python from __future__ import print_function, absolute_import, unicode_literals __all__ = ["run"] __version__ = "0.0.4" __author__ = "Dan Foreman-Mackey ([email protected])" __copyright__ = "Copyright 2013 Daniel Foreman-Mackey" __contributors__ = [] import os import re import json import shutil import subprocess from multiprocessing import Pool import feedparser from html2text import html2text # Regular expressions. id_re = re.compile(r"http://arxiv.org/abs/(.*)") title_re = re.compile(r"(.*) \(arXiv(?:.*?)\)$") author_re = re.compile(r"<a href=\"(?:.*?)\">(.*?)</a>") def run(basedir, url="http://export.arxiv.org/rss/astro-ph", clobber=False, quiet=False, limit=None): # Make the base directory. try: os.makedirs(basedir) except: if not clobber: raise shutil.rmtree(basedir) os.makedirs(basedir) # Fetch the abstracts. if not quiet: print("Fetching recent abstracts from: {0}".format(url)) abstracts = get_recent(url) if not quiet: print(" ... Found {0} abstracts.".format(len(abstracts))) if limit is not None: print("Limiting to {0} total.".format(limit)) abstracts = abstracts[:int(limit)] if not quiet: print("Saving audio files (slowly) in: {0}".format(basedir)) p = Pool() p.map(_run_one, zip([basedir] * len(abstracts), abstracts)) if not quiet: print(" ... Done.") def _run_one(args): basedir, abstract = args # Create the directory for the audio files. basedir = os.path.join(basedir, abstract["id"]) os.makedirs(basedir) # Save the metadata. json.dump(abstract, open(os.path.join(basedir, "info.json"), "w"), sort_keys=True, indent=4, separators=(",", ": ")) # Save the audio files. by = "\n\nBy: " + abstract["authors"][0] l = len(abstract["authors"]) if l == 2: by += " and " + abstract["authors"][1] elif l > 2: by += " and {0} others.".format(l - 1) r = text2audio(abstract["title"] + by, os.path.join(basedir, "brief.m4a")) assert r == 0, "Couldn't save brief for: {0}".format(abstract["id"]) r = text2audio(", ".join(abstract["authors"]), os.path.join(basedir, "authors.m4a")) assert r == 0, "Couldn't save authors for: {0}".format(abstract["id"]) r = text2audio(abstract["abstract"], os.path.join(basedir, "abstract.m4a")) assert r == 0, "Couldn't save abstract for: {0}".format(abstract["id"]) def get_recent(rss_url): d = feedparser.parse(rss_url) results = [] for e in d.entries: results.append({ "id": id_re.findall(e.id)[0], "title": title_re.findall(e.title)[0], "authors": author_re.findall(e.author), "abstract": html2text(e.summary), }) return results def text2audio(text, filename): p = subprocess.Popen(["say", "-o", filename], stdin=subprocess.PIPE) p.communicate(text) code = p.wait() return code
bsd-2-clause
valentin-krasontovitsch/ansible
test/units/parsing/test_metadata.py
125
10000
# coding: utf-8 # (c) 2017, Toshio Kuratomi <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import ast import pytest from ansible.parsing import metadata as md LICENSE = b"""# some license text boilerplate # That we have at the top of files """ FUTURE_IMPORTS = b""" from __future__ import (absolute_import, division, print_function) """ REGULAR_IMPORTS = b""" import test from foo import bar """ STANDARD_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} """ TEXT_STD_METADATA = b""" ANSIBLE_METADATA = u''' metadata_version: '1.1' status: - 'stableinterface' supported_by: 'core' ''' """ BYTES_STD_METADATA = b""" ANSIBLE_METADATA = b''' metadata_version: '1.1' status: - 'stableinterface' supported_by: 'core' ''' """ TRAILING_COMMENT_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} # { Testing } """ MULTIPLE_STATEMENTS_METADATA = b""" DOCUMENTATION = "" ; ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} ; RETURNS = "" """ EMBEDDED_COMMENT_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], # { Testing } 'supported_by': 'core'} """ HASH_SYMBOL_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], 'supported_by': 'core # Testing '} """ HASH_SYMBOL_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], 'supported_by': 'core # Testing '} """ HASH_COMBO_METADATA = b""" ANSIBLE_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], # { Testing } 'supported_by': 'core'} # { Testing } """ METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} HASH_SYMBOL_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], 'supported_by': 'core'} METADATA_EXAMPLES = ( # Standard import (LICENSE + FUTURE_IMPORTS + STANDARD_METADATA + REGULAR_IMPORTS, (METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])), # Metadata at end of file (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + STANDARD_METADATA.rstrip(), (METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])), # Metadata at beginning of file (STANDARD_METADATA + LICENSE + REGULAR_IMPORTS, (METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])), # Standard import with a trailing comment (LICENSE + FUTURE_IMPORTS + TRAILING_COMMENT_METADATA + REGULAR_IMPORTS, (METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])), # Metadata at end of file with a trailing comment (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TRAILING_COMMENT_METADATA.rstrip(), (METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])), # Metadata at beginning of file with a trailing comment (TRAILING_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS, (METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])), # FIXME: Current code cannot handle multiple statements on the same line. # This is bad style so we're just going to ignore it for now # Standard import with other statements on the same line # (LICENSE + FUTURE_IMPORTS + MULTIPLE_STATEMENTS_METADATA + REGULAR_IMPORTS, # (METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])), # Metadata at end of file with other statements on the same line # (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + MULTIPLE_STATEMENTS_METADATA.rstrip(), # (METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])), # Metadata at beginning of file with other statements on the same line # (MULTIPLE_STATEMENTS_METADATA + LICENSE + REGULAR_IMPORTS, # (METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])), # Standard import with comment inside the metadata (LICENSE + FUTURE_IMPORTS + EMBEDDED_COMMENT_METADATA + REGULAR_IMPORTS, (METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])), # Metadata at end of file with comment inside the metadata (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + EMBEDDED_COMMENT_METADATA.rstrip(), (METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])), # Metadata at beginning of file with comment inside the metadata (EMBEDDED_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS, (METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])), # FIXME: Current code cannot handle hash symbols in the last element of # the metadata. Fortunately, the metadata currently fully specifies all # the strings inside of metadata and none of them can contain a hash. # Need to fix this to future-proof it against strings containing hashes # Standard import with hash symbol in metadata # (LICENSE + FUTURE_IMPORTS + HASH_SYMBOL_METADATA + REGULAR_IMPORTS, # (HASH_SYMBOL_METADATA, 5, 0, 7, 53, ['ANSIBLE_METADATA'])), # Metadata at end of file with hash symbol in metadata # (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_SYMBOL_HASH_SYMBOL_METADATA.rstrip(), # (HASH_SYMBOL_METADATA, 8, 0, 10, 53, ['ANSIBLE_METADATA'])), # Metadata at beginning of file with hash symbol in metadata # (HASH_SYMBOL_HASH_SYMBOL_METADATA + LICENSE + REGULAR_IMPORTS, # (HASH_SYMBOL_METADATA, 1, 0, 3, 53, ['ANSIBLE_METADATA'])), # Standard import with a bunch of hashes everywhere (LICENSE + FUTURE_IMPORTS + HASH_COMBO_METADATA + REGULAR_IMPORTS, (HASH_SYMBOL_METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])), # Metadata at end of file with a bunch of hashes everywhere (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_COMBO_METADATA.rstrip(), (HASH_SYMBOL_METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])), # Metadata at beginning of file with a bunch of hashes everywhere (HASH_COMBO_METADATA + LICENSE + REGULAR_IMPORTS, (HASH_SYMBOL_METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])), # Standard import with a junk ANSIBLE_METADATA as well (LICENSE + FUTURE_IMPORTS + b"\nANSIBLE_METADATA = 10\n" + HASH_COMBO_METADATA + REGULAR_IMPORTS, (HASH_SYMBOL_METADATA, 7, 0, 10, 42, ['ANSIBLE_METADATA'])), ) # FIXME: String/yaml metadata is not implemented yet. Need more test cases once it is implemented STRING_METADATA_EXAMPLES = ( # Standard import (LICENSE + FUTURE_IMPORTS + TEXT_STD_METADATA + REGULAR_IMPORTS, (METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])), # Metadata at end of file (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TEXT_STD_METADATA.rstrip(), (METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])), # Metadata at beginning of file (TEXT_STD_METADATA + LICENSE + REGULAR_IMPORTS, (METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])), # Standard import (LICENSE + FUTURE_IMPORTS + BYTES_STD_METADATA + REGULAR_IMPORTS, (METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])), # Metadata at end of file (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + BYTES_STD_METADATA.rstrip(), (METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])), # Metadata at beginning of file (BYTES_STD_METADATA + LICENSE + REGULAR_IMPORTS, (METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])), ) @pytest.mark.parametrize("code, expected", METADATA_EXAMPLES) def test_dict_metadata(code, expected): assert md.extract_metadata(module_data=code, offsets=True) == expected @pytest.mark.parametrize("code, expected", STRING_METADATA_EXAMPLES) def test_string_metadata(code, expected): # FIXME: String/yaml metadata is not implemented yet. with pytest.raises(NotImplementedError): assert md.extract_metadata(module_data=code, offsets=True) == expected def test_required_params(): with pytest.raises(TypeError, message='One of module_ast or module_data must be given'): assert md.extract_metadata() def test_module_data_param_given_with_offset(): with pytest.raises(TypeError, message='If offsets is True then module_data must also be given'): assert md.extract_metadata(module_ast='something', offsets=True) def test_invalid_dict_metadata(): with pytest.raises(SyntaxError): assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS) with pytest.raises(md.ParseError, message='Unable to find the end of dictionary'): assert md.extract_metadata(module_ast=ast.parse(LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}\n' + REGULAR_IMPORTS), module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS, offsets=True) def test_multiple_statements_limitation(): with pytest.raises(md.ParseError, message='Multiple statements per line confuses the module metadata parser.'): assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}; a=b\n' + REGULAR_IMPORTS, offsets=True)
gpl-3.0
odrodrig/Devoxx4Kids
node_modules/utf8/tests/generate-test-data.py
1788
1435
#!/usr/bin/env python import re import json # https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae # http://stackoverflow.com/a/13436167/96656 def unisymbol(codePoint): if codePoint >= 0x0000 and codePoint <= 0xFFFF: return unichr(codePoint) elif codePoint >= 0x010000 and codePoint <= 0x10FFFF: highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800 lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00 return unichr(highSurrogate) + unichr(lowSurrogate) else: return 'Error' def hexify(codePoint): return 'U+' + hex(codePoint)[2:].upper().zfill(6) def writeFile(filename, contents): print filename with open(filename, 'w') as f: f.write(contents.strip() + '\n') data = [] for codePoint in range(0x000000, 0x10FFFF + 1): # Skip non-scalar values. if codePoint >= 0xD800 and codePoint <= 0xDFFF: continue symbol = unisymbol(codePoint) # http://stackoverflow.com/a/17199950/96656 bytes = symbol.encode('utf8').decode('latin1') data.append({ 'codePoint': codePoint, 'decoded': symbol, 'encoded': bytes }); jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': ')) # Use tabs instead of double spaces for indentation jsonData = jsonData.replace(' ', '\t') # Escape hexadecimal digits in escape sequences jsonData = re.sub( r'\\u([a-fA-F0-9]{4})', lambda match: r'\u{}'.format(match.group(1).upper()), jsonData ) writeFile('data.json', jsonData)
mit
bitkeeper/python-opcua
opcua/server/binary_server_asyncio.py
1
4546
""" Socket server forwarding request to internal server """ import logging try: # we prefer to use bundles asyncio version, otherwise fallback to trollius import asyncio except ImportError: import trollius as asyncio from opcua import ua from opcua.server.uaprocessor import UaProcessor logger = logging.getLogger(__name__) class BinaryServer(object): def __init__(self, internal_server, hostname, port): self.logger = logging.getLogger(__name__) self.hostname = hostname self.port = port self.iserver = internal_server self.loop = internal_server.loop self._server = None self._policies = [] def set_policies(self, policies): self._policies = policies def start(self): class OPCUAProtocol(asyncio.Protocol): """ instanciated for every connection defined as internal class since it needs access to the internal server object FIXME: find another solution """ iserver = self.iserver loop = self.loop logger = self.logger policies = self._policies def connection_made(self, transport): self.peername = transport.get_extra_info('peername') self.logger.info('New connection from %s', self.peername) self.transport = transport self.processor = UaProcessor(self.iserver, self.transport) self.processor.set_policies(self.policies) self.data = b"" self.iserver.asyncio_transports.append(transport) def connection_lost(self, ex): self.logger.info('Lost connection from %s, %s', self.peername, ex) self.transport.close() self.iserver.asyncio_transports.remove(self.transport) self.processor.close() def data_received(self, data): logger.debug("received %s bytes from socket", len(data)) if self.data: data = self.data + data self.data = b"" self._process_data(data) def _process_data(self, data): buf = ua.utils.Buffer(data) while True: try: backup_buf = buf.copy() try: hdr = ua.Header.from_string(buf) except ua.utils.NotEnoughData: logger.info("We did not receive enough data from client, waiting for more") self.data = backup_buf.read(len(backup_buf)) return if len(buf) < hdr.body_size: logger.info("We did not receive enough data from client, waiting for more") self.data = backup_buf.read(len(backup_buf)) return ret = self.processor.process(hdr, buf) if not ret: logger.info("processor returned False, we close connection from %s", self.peername) self.transport.close() return if len(buf) == 0: return except Exception: logger.exception("Exception raised while parsing message from client, closing") return coro = self.loop.create_server(OPCUAProtocol, self.hostname, self.port) self._server = self.loop.run_coro_and_wait(coro) # get the port and the hostname from the created server socket # only relevant for dynamic port asignment (when self.port == 0) if self.port == 0 and len(self._server.sockets) == 1: # will work for AF_INET and AF_INET6 socket names # these are to only families supported by the create_server call sockname = self._server.sockets[0].getsockname() self.hostname = sockname[0] self.port = sockname[1] print('Listening on {0}:{1}'.format(self.hostname, self.port)) def stop(self): self.logger.info("Closing asyncio socket server") for transport in self.iserver.asyncio_transports: transport.close() if self._server: self.loop.call_soon(self._server.close) self.loop.run_coro_and_wait(self._server.wait_closed())
lgpl-3.0
Duoxilian/home-assistant
homeassistant/const.py
3
12028
# coding: utf-8 """Constants used by Home Assistant components.""" MAJOR_VERSION = 0 MINOR_VERSION = 39 PATCH_VERSION = '0.dev0' __short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION) __version__ = '{}.{}'.format(__short_version__, PATCH_VERSION) REQUIRED_PYTHON_VER = (3, 4, 2) REQUIRED_PYTHON_VER_WIN = (3, 5, 2) PROJECT_NAME = 'Home Assistant' PROJECT_PACKAGE_NAME = 'homeassistant' PROJECT_LICENSE = 'Apache License 2.0' PROJECT_AUTHOR = 'The Home Assistant Authors' PROJECT_COPYRIGHT = ' 2013, {}'.format(PROJECT_AUTHOR) PROJECT_URL = 'https://home-assistant.io/' PROJECT_EMAIL = '[email protected]' PROJECT_DESCRIPTION = ('Open-source home automation platform ' 'running on Python 3.') PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source ' 'home automation platform running on Python 3. ' 'Track and control all devices at home and ' 'automate control. ' 'Installation in less than a minute.') PROJECT_CLASSIFIERS = [ 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.4', 'Topic :: Home Automation' ] PROJECT_GITHUB_USERNAME = 'home-assistant' PROJECT_GITHUB_REPOSITORY = 'home-assistant' PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME) GITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME, PROJECT_GITHUB_REPOSITORY) GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH) PLATFORM_FORMAT = '{}.{}' # Can be used to specify a catch all when registering state or event listeners. MATCH_ALL = '*' # If no name is specified DEVICE_DEFAULT_NAME = 'Unnamed Device' WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] SUN_EVENT_SUNSET = 'sunset' SUN_EVENT_SUNRISE = 'sunrise' # #### CONFIG #### CONF_ABOVE = 'above' CONF_ACCESS_TOKEN = 'access_token' CONF_AFTER = 'after' CONF_ALIAS = 'alias' CONF_API_KEY = 'api_key' CONF_AUTHENTICATION = 'authentication' CONF_BASE = 'base' CONF_BEFORE = 'before' CONF_BELOW = 'below' CONF_BINARY_SENSORS = 'binary_sensors' CONF_BLACKLIST = 'blacklist' CONF_BRIGHTNESS = 'brightness' CONF_CODE = 'code' CONF_COLOR_TEMP = 'color_temp' CONF_COMMAND = 'command' CONF_COMMAND_CLOSE = 'command_close' CONF_COMMAND_OFF = 'command_off' CONF_COMMAND_ON = 'command_on' CONF_COMMAND_OPEN = 'command_open' CONF_COMMAND_STATE = 'command_state' CONF_COMMAND_STOP = 'command_stop' CONF_CONDITION = 'condition' CONF_COVERS = 'covers' CONF_CUSTOMIZE = 'customize' CONF_CUSTOMIZE_DOMAIN = 'customize_domain' CONF_CUSTOMIZE_GLOB = 'customize_glob' CONF_DEVICE = 'device' CONF_DEVICE_CLASS = 'device_class' CONF_DEVICES = 'devices' CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger' CONF_DISCOVERY = 'discovery' CONF_DISPLAY_OPTIONS = 'display_options' CONF_DOMAIN = 'domain' CONF_DOMAINS = 'domains' CONF_ELEVATION = 'elevation' CONF_EMAIL = 'email' CONF_ENTITIES = 'entities' CONF_ENTITY_ID = 'entity_id' CONF_ENTITY_NAMESPACE = 'entity_namespace' CONF_EVENT = 'event' CONF_EXCLUDE = 'exclude' CONF_FILE_PATH = 'file_path' CONF_FILENAME = 'filename' CONF_FRIENDLY_NAME = 'friendly_name' CONF_HEADERS = 'headers' CONF_HOST = 'host' CONF_HOSTS = 'hosts' CONF_ICON = 'icon' CONF_INCLUDE = 'include' CONF_ID = 'id' CONF_LATITUDE = 'latitude' CONF_LONGITUDE = 'longitude' CONF_MAC = 'mac' CONF_METHOD = 'method' CONF_MINIMUM = 'minimum' CONF_MAXIMUM = 'maximum' CONF_MONITORED_CONDITIONS = 'monitored_conditions' CONF_MONITORED_VARIABLES = 'monitored_variables' CONF_NAME = 'name' CONF_OFFSET = 'offset' CONF_OPTIMISTIC = 'optimistic' CONF_PACKAGES = 'packages' CONF_PASSWORD = 'password' CONF_PATH = 'path' CONF_PAYLOAD = 'payload' CONF_PAYLOAD_OFF = 'payload_off' CONF_PAYLOAD_ON = 'payload_on' CONF_PENDING_TIME = 'pending_time' CONF_PIN = 'pin' CONF_PLATFORM = 'platform' CONF_PORT = 'port' CONF_PREFIX = 'prefix' CONF_PROTOCOL = 'protocol' CONF_QUOTE = 'quote' CONF_RECIPIENT = 'recipient' CONF_RESOURCE = 'resource' CONF_RESOURCES = 'resources' CONF_RGB = 'rgb' CONF_SCAN_INTERVAL = 'scan_interval' CONF_SENDER = 'sender' CONF_SENSOR_CLASS = 'sensor_class' CONF_SENSORS = 'sensors' CONF_SSL = 'ssl' CONF_STATE = 'state' CONF_STRUCTURE = 'structure' CONF_SWITCHES = 'switches' CONF_TEMPERATURE_UNIT = 'temperature_unit' CONF_TIME_ZONE = 'time_zone' CONF_TIMEOUT = 'timeout' CONF_TOKEN = 'token' CONF_TRIGGER_TIME = 'trigger_time' CONF_TYPE = 'type' CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement' CONF_UNIT_SYSTEM = 'unit_system' CONF_URL = 'url' CONF_USERNAME = 'username' CONF_VALUE_TEMPLATE = 'value_template' CONF_VERIFY_SSL = 'verify_ssl' CONF_WEEKDAY = 'weekday' CONF_WHITELIST = 'whitelist' CONF_ZONE = 'zone' # #### EVENTS #### EVENT_HOMEASSISTANT_START = 'homeassistant_start' EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop' EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close' EVENT_STATE_CHANGED = 'state_changed' EVENT_TIME_CHANGED = 'time_changed' EVENT_CALL_SERVICE = 'call_service' EVENT_SERVICE_EXECUTED = 'service_executed' EVENT_PLATFORM_DISCOVERED = 'platform_discovered' EVENT_COMPONENT_LOADED = 'component_loaded' EVENT_SERVICE_REGISTERED = 'service_registered' # #### STATES #### STATE_ON = 'on' STATE_OFF = 'off' STATE_HOME = 'home' STATE_NOT_HOME = 'not_home' STATE_UNKNOWN = 'unknown' STATE_OPEN = 'open' STATE_CLOSED = 'closed' STATE_PLAYING = 'playing' STATE_PAUSED = 'paused' STATE_IDLE = 'idle' STATE_STANDBY = 'standby' STATE_ALARM_DISARMED = 'disarmed' STATE_ALARM_ARMED_HOME = 'armed_home' STATE_ALARM_ARMED_AWAY = 'armed_away' STATE_ALARM_PENDING = 'pending' STATE_ALARM_TRIGGERED = 'triggered' STATE_LOCKED = 'locked' STATE_UNLOCKED = 'unlocked' STATE_UNAVAILABLE = 'unavailable' # #### STATE AND EVENT ATTRIBUTES #### # Attribution ATTR_ATTRIBUTION = 'attribution' # Contains current time for a TIME_CHANGED event ATTR_NOW = 'now' # Contains domain, service for a SERVICE_CALL event ATTR_DOMAIN = 'domain' ATTR_SERVICE = 'service' ATTR_SERVICE_DATA = 'service_data' # Data for a SERVICE_EXECUTED event ATTR_SERVICE_CALL_ID = 'service_call_id' # Contains one string or a list of strings, each being an entity id ATTR_ENTITY_ID = 'entity_id' # String with a friendly name for the entity ATTR_FRIENDLY_NAME = 'friendly_name' # A picture to represent entity ATTR_ENTITY_PICTURE = 'entity_picture' # Icon to use in the frontend ATTR_ICON = 'icon' # The unit of measurement if applicable ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement' CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str # Temperature attribute ATTR_TEMPERATURE = 'temperature' TEMP_CELSIUS = '°C' TEMP_FAHRENHEIT = '°F' # Length units LENGTH_CENTIMETERS = 'cm' # type: str LENGTH_METERS = 'm' # type: str LENGTH_KILOMETERS = 'km' # type: str LENGTH_INCHES = 'in' # type: str LENGTH_FEET = 'ft' # type: str LENGTH_YARD = 'yd' # type: str LENGTH_MILES = 'mi' # type: str # Volume units VOLUME_LITERS = 'L' # type: str VOLUME_MILLILITERS = 'mL' # type: str VOLUME_GALLONS = 'gal' # type: str VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str # Mass units MASS_GRAMS = 'g' # type: str MASS_KILOGRAMS = 'kg' # type: str MASS_OUNCES = 'oz' # type: str MASS_POUNDS = 'lb' # type: str # Contains the information that is discovered ATTR_DISCOVERED = 'discovered' # Location of the device/sensor ATTR_LOCATION = 'location' ATTR_BATTERY_LEVEL = 'battery_level' ATTR_WAKEUP = 'wake_up_interval' # For devices which support a code attribute ATTR_CODE = 'code' ATTR_CODE_FORMAT = 'code_format' # For devices which support an armed state ATTR_ARMED = 'device_armed' # For devices which support a locked state ATTR_LOCKED = 'locked' # For sensors that support 'tripping', eg. motion and door sensors ATTR_TRIPPED = 'device_tripped' # For sensors that support 'tripping' this holds the most recent # time the device was tripped ATTR_LAST_TRIP_TIME = 'last_tripped_time' # For all entity's, this hold whether or not it should be hidden ATTR_HIDDEN = 'hidden' # Location of the entity ATTR_LATITUDE = 'latitude' ATTR_LONGITUDE = 'longitude' # Accuracy of location in meters ATTR_GPS_ACCURACY = 'gps_accuracy' # If state is assumed ATTR_ASSUMED_STATE = 'assumed_state' ATTR_STATE = 'state' ATTR_OPTION = 'option' # Bitfield of supported component features for the entity ATTR_SUPPORTED_FEATURES = 'supported_features' # Class of device within its domain ATTR_DEVICE_CLASS = 'device_class' # #### SERVICES #### SERVICE_HOMEASSISTANT_STOP = 'stop' SERVICE_HOMEASSISTANT_RESTART = 'restart' SERVICE_TURN_ON = 'turn_on' SERVICE_TURN_OFF = 'turn_off' SERVICE_TOGGLE = 'toggle' SERVICE_VOLUME_UP = 'volume_up' SERVICE_VOLUME_DOWN = 'volume_down' SERVICE_VOLUME_MUTE = 'volume_mute' SERVICE_VOLUME_SET = 'volume_set' SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause' SERVICE_MEDIA_PLAY = 'media_play' SERVICE_MEDIA_PAUSE = 'media_pause' SERVICE_MEDIA_STOP = 'media_stop' SERVICE_MEDIA_NEXT_TRACK = 'media_next_track' SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track' SERVICE_MEDIA_SEEK = 'media_seek' SERVICE_ALARM_DISARM = 'alarm_disarm' SERVICE_ALARM_ARM_HOME = 'alarm_arm_home' SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away' SERVICE_ALARM_TRIGGER = 'alarm_trigger' SERVICE_LOCK = 'lock' SERVICE_UNLOCK = 'unlock' SERVICE_OPEN = 'open' SERVICE_CLOSE = 'close' SERVICE_CLOSE_COVER = 'close_cover' SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt' SERVICE_OPEN_COVER = 'open_cover' SERVICE_OPEN_COVER_TILT = 'open_cover_tilt' SERVICE_SET_COVER_POSITION = 'set_cover_position' SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position' SERVICE_STOP_COVER = 'stop_cover' SERVICE_STOP_COVER_TILT = 'stop_cover_tilt' SERVICE_SELECT_OPTION = 'select_option' # #### API / REMOTE #### SERVER_PORT = 8123 URL_ROOT = '/' URL_API = '/api/' URL_API_STREAM = '/api/stream' URL_API_CONFIG = '/api/config' URL_API_DISCOVERY_INFO = '/api/discovery_info' URL_API_STATES = '/api/states' URL_API_STATES_ENTITY = '/api/states/{}' URL_API_EVENTS = '/api/events' URL_API_EVENTS_EVENT = '/api/events/{}' URL_API_SERVICES = '/api/services' URL_API_SERVICES_SERVICE = '/api/services/{}/{}' URL_API_EVENT_FORWARD = '/api/event_forwarding' URL_API_COMPONENTS = '/api/components' URL_API_ERROR_LOG = '/api/error_log' URL_API_LOG_OUT = '/api/log_out' URL_API_TEMPLATE = '/api/template' HTTP_OK = 200 HTTP_CREATED = 201 HTTP_MOVED_PERMANENTLY = 301 HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_UNPROCESSABLE_ENTITY = 422 HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_BASIC_AUTHENTICATION = 'basic' HTTP_DIGEST_AUTHENTICATION = 'digest' HTTP_HEADER_HA_AUTH = 'X-HA-access' HTTP_HEADER_ACCEPT_ENCODING = 'Accept-Encoding' HTTP_HEADER_CONTENT_TYPE = 'Content-type' HTTP_HEADER_CONTENT_ENCODING = 'Content-Encoding' HTTP_HEADER_VARY = 'Vary' HTTP_HEADER_CONTENT_LENGTH = 'Content-Length' HTTP_HEADER_CACHE_CONTROL = 'Cache-Control' HTTP_HEADER_EXPIRES = 'Expires' HTTP_HEADER_ORIGIN = 'Origin' HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With' HTTP_HEADER_ACCEPT = 'Accept' HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin' HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers' ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT, HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_HA_AUTH] CONTENT_TYPE_JSON = 'application/json' CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}' CONTENT_TYPE_TEXT_PLAIN = 'text/plain' # The exit code to send to request a restart RESTART_EXIT_CODE = 100 UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str LENGTH = 'length' # type: str MASS = 'mass' # type: str VOLUME = 'volume' # type: str TEMPERATURE = 'temperature' # type: str SPEED_MS = 'speed_ms' # type: str ILLUMINANCE = 'illuminance' # type: str
mit
lgscofield/odoo
addons/survey/wizard/__init__.py
385
1026
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import survey_email_compose_message
agpl-3.0
marcel-dancak/QGIS
tests/src/python/test_qgsrasterbandcombobox.py
23
3809
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsRasterBandComboBox. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Nyall Dawson' __date__ = '09/05/2017' __copyright__ = 'Copyright 2017, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA import os from qgis.core import QgsRasterLayer from qgis.gui import QgsRasterBandComboBox from qgis.testing import start_app, unittest from qgis.PyQt.QtCore import QFileInfo from qgis.PyQt.QtTest import QSignalSpy from utilities import unitTestDataPath start_app() class TestQgsRasterBandComboBox(unittest.TestCase): def testNoLayer(self): """ Test widget with no layer """ combo = QgsRasterBandComboBox() self.assertFalse(combo.layer()) self.assertEqual(combo.currentBand(), -1) combo.setShowNotSetOption(True) self.assertEqual(combo.currentBand(), -1) combo.setBand(11111) self.assertEqual(combo.currentBand(), -1) combo.setBand(-11111) self.assertEqual(combo.currentBand(), -1) def testOneBandRaster(self): path = os.path.join(unitTestDataPath('raster'), 'band1_float32_noct_epsg4326.tif') info = QFileInfo(path) base_name = info.baseName() layer = QgsRasterLayer(path, base_name) self.assertTrue(layer) combo = QgsRasterBandComboBox() combo.setLayer(layer) self.assertEqual(combo.layer(), layer) self.assertEqual(combo.currentBand(), 1) self.assertEqual(combo.count(), 1) combo.setShowNotSetOption(True) self.assertEqual(combo.currentBand(), 1) self.assertEqual(combo.count(), 2) combo.setBand(-1) self.assertEqual(combo.currentBand(), -1) combo.setBand(1) self.assertEqual(combo.currentBand(), 1) combo.setShowNotSetOption(False) self.assertEqual(combo.currentBand(), 1) self.assertEqual(combo.count(), 1) def testMultiBandRaster(self): path = os.path.join(unitTestDataPath('raster'), 'band3_float32_noct_epsg4326.tif') info = QFileInfo(path) base_name = info.baseName() layer = QgsRasterLayer(path, base_name) self.assertTrue(layer) combo = QgsRasterBandComboBox() combo.setLayer(layer) self.assertEqual(combo.layer(), layer) self.assertEqual(combo.currentBand(), 1) self.assertEqual(combo.count(), 3) combo.setBand(2) self.assertEqual(combo.currentBand(), 2) combo.setShowNotSetOption(True) self.assertEqual(combo.currentBand(), 2) self.assertEqual(combo.count(), 4) combo.setShowNotSetOption(False) self.assertEqual(combo.currentBand(), 2) self.assertEqual(combo.count(), 3) def testSignals(self): path = os.path.join(unitTestDataPath('raster'), 'band3_float32_noct_epsg4326.tif') info = QFileInfo(path) base_name = info.baseName() layer = QgsRasterLayer(path, base_name) self.assertTrue(layer) combo = QgsRasterBandComboBox() combo.setLayer(layer) signal_spy = QSignalSpy(combo.bandChanged) combo.setBand(2) self.assertEqual(len(signal_spy), 1) self.assertEqual(signal_spy[0][0], 2) combo.setBand(3) self.assertEqual(len(signal_spy), 2) self.assertEqual(signal_spy[1][0], 3) if __name__ == '__main__': unittest.main()
gpl-2.0
MartyParty21/AwakenDreamsClient
mcp/runtime/startserver.py
3
1524
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Fri Apr 8 16:54:36 2011 @author: ProfMobius @version: v1.0 """ import sys import logging import json from optparse import OptionParser from commands import Commands, SERVER def main(): parser = OptionParser(version='MCP %s' % Commands.fullversion()) parser.add_option('-c', '--config', dest='config', help='additional configuration file') parser.add_option('-m', '--main', dest='mainclass', help='Main class to start', default='net.minecraft.server.MinecraftServer') parser.add_option('-j', '--json', dest='json',action='store_true', help='Use the json file to setup parameters', default=False) options, _ = parser.parse_args() startserver(options.config, options.mainclass, options.json) def startserver(conffile, mainclass, jsonoverride): try: commands = Commands(conffile) #if not mainclass: # mainclass = "net.minecraft.server.MinecraftServer" extraargs = "" if jsonoverride: jsonData = json.load(open(commands.jsonFile)) mainclass = jsonData['mainClass'] extraargs = jsonData['minecraftArguments'] if not commands.checkbins(SERVER): commands.logger.warning('!! Can not find server bins !!') sys.exit(1) commands.startserver(mainclass, extraargs) except Exception: # pylint: disable-msg=W0703 logging.exception('FATAL ERROR') sys.exit(1) if __name__ == '__main__': main()
gpl-3.0
jinnykoo/wuyisj
tests/unit/core/customisation_tests.py
5
3068
import os import tempfile from django.test import TestCase from django.conf import settings from oscar.core import customisation VALID_FOLDER_PATH = 'tests/_site/apps' class TestUtilities(TestCase): def test_subfolder_extraction(self): folders = list(customisation.subfolders('/var/www/eggs')) self.assertEqual(folders, ['/var', '/var/www', '/var/www/eggs']) class TestForkAppFunction(TestCase): def setUp(self): self.tmp_folder = tempfile.mkdtemp() def test_raises_exception_for_nonexistant_app_label(self): with self.assertRaises(ValueError): customisation.fork_app('sillytown', 'somefolder') def test_raises_exception_if_app_has_already_been_forked(self): # We piggyback on another test which means a custom app is already in # the settings we use for the test suite. We just check that's still # the case here. assert 'tests._site.apps.partner' in settings.INSTALLED_APPS with self.assertRaises(ValueError): customisation.fork_app('partner', VALID_FOLDER_PATH) def test_creates_new_folder(self): customisation.fork_app('order', self.tmp_folder) new_folder_path = os.path.join(self.tmp_folder, 'order') self.assertTrue(os.path.exists(new_folder_path)) def test_creates_init_file(self): customisation.fork_app('order', self.tmp_folder) filepath = os.path.join(self.tmp_folder, 'order', '__init__.py') self.assertTrue(os.path.exists(filepath)) def test_handles_dashboard_app(self): # Dashboard apps are fiddly as they aren't identified by a single app # label. customisation.fork_app('dashboard.catalogue', self.tmp_folder) # Check __init__.py created (and supporting folders) init_path = os.path.join(self.tmp_folder, 'dashboard/catalogue/__init__.py') self.assertTrue(os.path.exists(init_path)) def test_creates_models_and_admin_file(self): customisation.fork_app('order', self.tmp_folder) for module, expected_string in [ ('models', 'from oscar.apps.order.models import *'), ('admin', 'from oscar.apps.order.admin import *'), ('config', 'OrderConfig')]: filepath = os.path.join(self.tmp_folder, 'order', '%s.py' % module) self.assertTrue(os.path.exists(filepath)) contents = open(filepath).read() self.assertTrue(expected_string in contents) def test_copies_in_migrations_when_needed(self): for app, has_models in [('order', True), ('search', False)]: customisation.fork_app(app, self.tmp_folder) native_migration_path = os.path.join( self.tmp_folder, app, 'migrations') self.assertEqual(has_models, os.path.exists(native_migration_path)) south_migration_path = os.path.join( self.tmp_folder, app, 'south_migrations') self.assertEqual(has_models, os.path.exists(south_migration_path))
bsd-3-clause
nex3/pygments
pygments/styles/vim.py
75
1976
# -*- coding: utf-8 -*- """ pygments.styles.vim ~~~~~~~~~~~~~~~~~~~ A highlighting style for Pygments, inspired by vim. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Token class VimStyle(Style): """ Styles somewhat like vim 7.0 """ background_color = "#000000" highlight_color = "#222222" default_style = "#cccccc" styles = { Token: "#cccccc", Whitespace: "", Comment: "#000080", Comment.Preproc: "", Comment.Special: "bold #cd0000", Keyword: "#cdcd00", Keyword.Declaration: "#00cd00", Keyword.Namespace: "#cd00cd", Keyword.Pseudo: "", Keyword.Type: "#00cd00", Operator: "#3399cc", Operator.Word: "#cdcd00", Name: "", Name.Class: "#00cdcd", Name.Builtin: "#cd00cd", Name.Exception: "bold #666699", Name.Variable: "#00cdcd", String: "#cd0000", Number: "#cd00cd", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#cd0000", Generic.Inserted: "#00cd00", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #000080", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
bsd-2-clause
babelphish/fridge-cop
fridge_language.py
1
1219
import random def get_fridge_language(language): if (language == "EN"): return EnglishFridgeLanguage() class FridgeLanguage: def _init_(self): random.seed() def get_random_letter(self): value = random.random() * 100 total = 0 for letter_frequency in self.distribution: letter = letter_frequency[1] total += letter_frequency[0] if (value < total): break return letter class EnglishFridgeLanguage(FridgeLanguage): distribution = [ [ 13.0001 , 'E'], [ 9.056 , 'T'], [ 8.167 , 'A'], [ 7.507 , 'O'], [ 6.966 , 'I'], [ 6.749 , 'N'], [ 6.327 , 'S'], [ 6.094 , 'H'], [ 5.987 , 'R'], [ 4.253 , 'D'], [ 4.025 , 'L'], [ 2.782 , 'C'], [ 2.758 , 'U'], [ 2.406 , 'M'], [ 2.360 , 'W'], [ 2.228 , 'F'], [ 2.015 , 'G'], [ 1.974 , 'Y'], [ 1.929 , 'P'], [ 1.492 , 'B'], [ 0.978 , 'V'], [ 0.772 , 'K'], [ 0.153 , 'J'], [ 0.150 , 'X'], [ 0.095 , 'Q'], [ 0.074 , 'Z'] ]
agpl-3.0
yaojingwu1992/XlsxWriter
xlsxwriter/test/comparison/test_cond_format07.py
8
2470
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, [email protected] # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'cond_format07.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of a simple XlsxWriter file with conditional formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format({'bg_color': '#FF0000', 'dxf_index': 1}) format2 = workbook.add_format({'bg_color': '#92D050', 'dxf_index': 0}) data = [ [90, 80, 50, 10, 20, 90, 40, 90, 30, 40], [20, 10, 90, 100, 30, 60, 70, 60, 50, 90], [10, 50, 60, 50, 20, 50, 80, 30, 40, 60], [10, 90, 20, 40, 10, 40, 50, 70, 90, 50], [70, 100, 10, 90, 10, 10, 20, 100, 100, 40], [20, 60, 10, 100, 30, 10, 20, 60, 100, 10], [10, 60, 10, 80, 100, 80, 30, 30, 70, 40], [30, 90, 60, 10, 10, 100, 40, 40, 30, 40], [80, 90, 10, 20, 20, 50, 80, 20, 60, 90], [60, 80, 30, 30, 10, 50, 80, 60, 50, 30], ] for row, row_data in enumerate(data): worksheet.write_row(row, 0, row_data) row += 1 worksheet.conditional_format('A1:J10', {'type': 'cell', 'format': format1, 'criteria': '>=', 'value': 50, }) worksheet.conditional_format('A1:J10', {'type': 'cell', 'format': format2, 'criteria': '<', 'value': 50, }) workbook.close() self.assertExcelEqual()
bsd-2-clause
duointeractive/media-nommer
media_nommer/ec2nommerd/node_state.py
1
7367
""" Contains the :py:class:`NodeStateManager` class, which is an abstraction layer for storing and communicating the status of EC2_ nodes. """ import urllib2 import datetime import boto from twisted.internet import reactor from media_nommer.conf import settings from media_nommer.utils import logger from media_nommer.utils.compat import total_seconds class NodeStateManager(object): """ Tracks this node's state, reports it to :doc:`../feederd`, and terminates itself if certain conditions of inactivity are met. """ last_dtime_i_did_something = datetime.datetime.now() # Used for lazy-loading the SDB connection. Do not refer to directly. __aws_sdb_connection = None # Used for lazy-loading the SDB domain. Do not refer to directly. __aws_sdb_nommer_state_domain = None # Used for lazy-loading the EC2 connection. Do not refer to directly. __aws_ec2_connection = None # Store the instance ID for this EC2 node (if not local). __instance_id = None @classmethod def _aws_ec2_connection(cls): """ Lazy-loading of the EC2 boto connection. Refer to this instead of referencing cls.__aws_ec2_connection directly. :returns: A boto connection to Amazon's EC2 interface. """ if not cls.__aws_ec2_connection: cls.__aws_ec2_connection = boto.connect_ec2( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) return cls.__aws_ec2_connection @classmethod def _aws_sdb_connection(cls): """ Lazy-loading of the SimpleDB boto connection. Refer to this instead of referencing cls.__aws_sdb_connection directly. :returns: A boto connection to Amazon's SimpleDB interface. """ if not cls.__aws_sdb_connection: cls.__aws_sdb_connection = boto.connect_sdb( settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) return cls.__aws_sdb_connection @classmethod def _aws_sdb_nommer_state_domain(cls): """ Lazy-loading of the SimpleDB boto domain. Refer to this instead of referencing cls.__aws_sdb_nommer_state_domain directly. :returns: A boto SimpleDB domain for this workflow. """ if not cls.__aws_sdb_nommer_state_domain: cls.__aws_sdb_nommer_state_domain = cls._aws_sdb_connection().create_domain( settings.SIMPLEDB_EC2_NOMMER_STATE_DOMAIN) return cls.__aws_sdb_nommer_state_domain @classmethod def get_instance_id(cls, is_local=False): """ Determine this EC2 instance's unique instance ID. Lazy load this, and avoid further re-queries after the first one. :param bool is_local: When True, don't try to hit EC2's meta data server, When False, just make up a unique ID. :rtype: str :returns: The EC2 instance's ID. """ if not cls.__instance_id: if is_local: cls.__instance_id = 'local-dev' else: aws_meta_url = 'http://169.254.169.254/latest/meta-data/instance-id' response = urllib2.urlopen(aws_meta_url) cls.__instance_id = response.read() return cls.__instance_id @classmethod def is_ec2_instance(cls): """ Determine whether this is an EC2 instance or not. :rtype: bool :returns: ``True`` if this is an EC2 instance, ``False`` if otherwise. """ return cls.get_instance_id() != 'local-dev' @classmethod def send_instance_state_update(cls, state='ACTIVE'): """ Sends a status update to feederd through SimpleDB. Lets the daemon know how many jobs this instance is crunching right now. Also updates a timestamp field to let feederd know how long it has been since the instance's last check-in. :keyword str state: If this EC2_ instance is anything but ``ACTIVE``, pass the state here. This is useful during node termination. """ if cls.is_ec2_instance(): instance_id = cls.get_instance_id() item = cls._aws_sdb_nommer_state_domain().new_item(instance_id) item['id'] = instance_id item['active_jobs'] = cls.get_num_active_threads() - 1 item['last_report_dtime'] = datetime.datetime.now() item['state'] = state item.save() @classmethod def contemplate_termination(cls, thread_count_mod=0): """ Looks at how long it's been since this worker has done something, and decides whether to self-terminate. :param int thread_count_mod: Add this to the amount returned by the call to :py:meth:`get_num_active_threads`. This is useful when calling this method from a non-encoder thread. :rtype: bool :returns: ``True`` if this instance terminated itself, ``False`` if not. """ if not cls.is_ec2_instance(): # Developing locally, don't go here. return False # This is -1 since this is also a thread doing the contemplation. # This would always be 1, even if we had no jobs encoding, if we # didn't take into account this thread. num_active_threads = cls.get_num_active_threads() + thread_count_mod if num_active_threads > 0: # Encoding right now, don't terminate. return False tdelt = datetime.datetime.now() - cls.last_dtime_i_did_something # Total seconds of inactivity. inactive_secs = total_seconds(tdelt) # If we're over the inactivity threshold... if inactive_secs > settings.NOMMERD_MAX_INACTIVITY: instance_id = cls.get_instance_id() conn = cls._aws_ec2_connection() # Find this particular EC2 instance via boto. reservations = conn.get_all_instances(instance_ids=[instance_id]) # This should only be one match, but in the interest of # playing along... for reservation in reservations: for instance in reservation.instances: # Here's the instance, terminate it. logger.info("Goodbye, cruel world.") cls.send_instance_state_update(state='TERMINATED') instance.terminate() # Seeya later! return True # Continue existence, no termination. return False @classmethod def get_num_active_threads(cls): """ Checks the reactor's threadpool to see how many threads are currently working. This can be used to determine how busy this node is. :rtype: int :returns: The number of active threads. """ return len(reactor.getThreadPool().working) @classmethod def i_did_something(cls): """ Pat ourselves on the back each time we do something. Used for determining whether this node's continued existence is necessary anymore in :py:meth:`contemplate_termination`. """ cls.last_dtime_i_did_something = datetime.datetime.now()
bsd-3-clause
javierTerry/odoo
addons/point_of_sale/__openerp__.py
261
3612
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Point of Sale', 'version': '1.0.1', 'category': 'Point Of Sale', 'sequence': 6, 'summary': 'Touchscreen Interface for Shops', 'description': """ Quick and Easy sale process =========================== This module allows you to manage your shop sales very easily with a fully web based touchscreen interface. It is compatible with all PC tablets and the iPad, offering multiple payment methods. Product selection can be done in several ways: * Using a barcode reader * Browsing through categories of products or via a text search. Main Features ------------- * Fast encoding of the sale * Choose one payment method (the quick way) or split the payment between several payment methods * Computation of the amount of money to return * Create and confirm the picking list automatically * Allows the user to create an invoice automatically * Refund previous sales """, 'author': 'OpenERP SA', 'depends': ['sale_stock'], 'data': [ 'data/report_paperformat.xml', 'security/point_of_sale_security.xml', 'security/ir.model.access.csv', 'wizard/pos_box.xml', 'wizard/pos_confirm.xml', 'wizard/pos_details.xml', 'wizard/pos_discount.xml', 'wizard/pos_open_statement.xml', 'wizard/pos_payment.xml', 'wizard/pos_session_opening.xml', 'views/templates.xml', 'point_of_sale_report.xml', 'point_of_sale_view.xml', 'point_of_sale_sequence.xml', 'point_of_sale_data.xml', 'report/pos_order_report_view.xml', 'point_of_sale_workflow.xml', 'account_statement_view.xml', 'account_statement_report.xml', 'res_users_view.xml', 'res_partner_view.xml', 'views/report_statement.xml', 'views/report_usersproduct.xml', 'views/report_receipt.xml', 'views/report_saleslines.xml', 'views/report_detailsofsales.xml', 'views/report_payment.xml', 'views/report_sessionsummary.xml', 'views/point_of_sale.xml', ], 'demo': [ 'point_of_sale_demo.xml', 'account_statement_demo.xml', ], 'test': [ 'test/00_register_open.yml', 'test/01_order_to_payment.yml', 'test/02_order_to_invoice.yml', 'test/point_of_sale_report.yml', 'test/account_statement_reports.yml', ], 'installable': True, 'application': True, 'qweb': ['static/src/xml/pos.xml'], 'website': 'https://www.odoo.com/page/point-of-sale', 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ssh1/stbgui
lib/python/Components/Converter/PliExtraInfo.py
4
12709
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat) from enigma import iServiceInformation, iPlayableService from Components.Converter.Converter import Converter from Components.Element import cached from Components.config import config from Tools.Transponder import ConvertToHumanReadable, getChannelNumber from Tools.GetEcmInfo import GetEcmInfo from Poll import Poll def addspace(text): if text: text += " " return text class PliExtraInfo(Poll, Converter, object): def __init__(self, type): Converter.__init__(self, type) Poll.__init__(self) self.type = type self.poll_interval = 1000 self.poll_enabled = True self.caid_data = ( ( "0x100", "0x1ff", "Seca", "S", True ), ( "0x500", "0x5ff", "Via", "V", True ), ( "0x600", "0x6ff", "Irdeto", "I", True ), ( "0x900", "0x9ff", "NDS", "Nd", True ), ( "0xb00", "0xbff", "Conax", "Co", True ), ( "0xd00", "0xdff", "CryptoW", "Cw", True ), ( "0xe00", "0xeff", "PowerVU", "P", False ), ("0x1700", "0x17ff", "Beta", "B", True ), ("0x1800", "0x18ff", "Nagra", "N", True ), ("0x2600", "0x2600", "Biss", "Bi", False ), ("0x4ae0", "0x4ae1", "Dre", "D", False ), ("0x4aee", "0x4aee", "BulCrypt", "B1", False ), ("0x5581", "0x5581", "BulCrypt", "B2", False ) ) self.ca_table = ( ("CryptoCaidSecaAvailable", "S", False), ("CryptoCaidViaAvailable", "V", False), ("CryptoCaidIrdetoAvailable", "I", False), ("CryptoCaidNDSAvailable", "Nd", False), ("CryptoCaidConaxAvailable", "Co", False), ("CryptoCaidCryptoWAvailable", "Cw", False), ("CryptoCaidPowerVUAvailable", "P", False), ("CryptoCaidBetaAvailable", "B", False), ("CryptoCaidNagraAvailable", "N", False), ("CryptoCaidBissAvailable", "Bi", False), ("CryptoCaidDreAvailable", "D", False), ("CryptoCaidBulCrypt1Available","B1", False), ("CryptoCaidBulCrypt2Available","B2", False), ("CryptoCaidSecaSelected", "S", True), ("CryptoCaidViaSelected", "V", True), ("CryptoCaidIrdetoSelected", "I", True), ("CryptoCaidNDSSelected", "Nd", True), ("CryptoCaidConaxSelected", "Co", True), ("CryptoCaidCryptoWSelected", "Cw", True), ("CryptoCaidPowerVUSelected", "P", True), ("CryptoCaidBetaSelected", "B", True), ("CryptoCaidNagraSelected", "N", True), ("CryptoCaidBissSelected", "Bi", True), ("CryptoCaidDreSelected", "D", True), ("CryptoCaidBulCrypt1Selected", "B1", True), ("CryptoCaidBulCrypt2Selected", "B2", True), ) self.ecmdata = GetEcmInfo() self.feraw = self.fedata = self.updateFEdata = None def getCryptoInfo(self, info): if (info.getInfo(iServiceInformation.sIsCrypted) == 1): data = self.ecmdata.getEcmData() self.current_source = data[0] self.current_caid = data[1] self.current_provid = data[2] self.current_ecmpid = data[3] else: self.current_source = "" self.current_caid = "0" self.current_provid = "0" self.current_ecmpid = "0" def createCryptoBar(self, info): res = "" available_caids = info.getInfoObject(iServiceInformation.sCAIDs) for caid_entry in self.caid_data: if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16): color="\c0000??00" else: color = "\c007?7?7?" try: for caid in available_caids: if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16): color="\c00????00" except: pass if color != "\c007?7?7?" or caid_entry[4]: if res: res += " " res += color + caid_entry[3] res += "\c00??????" return res def createCryptoSpecial(self, info): caid_name = "FTA" try: for caid_entry in self.caid_data: if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16): caid_name = caid_entry[2] break return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16)) except: pass return "" def createResolution(self, info): xres = info.getInfo(iServiceInformation.sVideoWidth) if xres == -1: return "" yres = info.getInfo(iServiceInformation.sVideoHeight) mode = ("i", "p", " ")[info.getInfo(iServiceInformation.sProgressive)] fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000) return str(xres) + "x" + str(yres) + mode + fps def createVideoCodec(self, info): return ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)] def createPIDInfo(self, info): vpid = info.getInfo(iServiceInformation.sVideoPID) apid = info.getInfo(iServiceInformation.sAudioPID) pcrpid = info.getInfo(iServiceInformation.sPCRPID) sidpid = info.getInfo(iServiceInformation.sSID) tsid = info.getInfo(iServiceInformation.sTSID) onid = info.getInfo(iServiceInformation.sONID) if vpid < 0 : vpid = 0 if apid < 0 : apid = 0 if pcrpid < 0 : pcrpid = 0 if sidpid < 0 : sidpid = 0 if tsid < 0 : tsid = 0 if onid < 0 : onid = 0 return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid) def createTransponderInfo(self, fedata, feraw): if not feraw: return "" elif "DVB-T" in feraw.get("tuner_type"): tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) else: tmp = addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \ + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) def createFrequency(self, feraw): frequency = feraw.get("frequency") if frequency: if "DVB-T" in feraw.get("tuner_type"): return str(int(frequency / 1000000. + 0.5)) else: return str(int(frequency / 1000 + 0.5)) return "" def createChannelNumber(self, fedata, feraw): return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or "" def createSymbolRate(self, fedata, feraw): if "DVB-T" in feraw.get("tuner_type"): bandwidth = fedata.get("bandwidth") if bandwidth: return bandwidth else: symbolrate = fedata.get("symbol_rate") if symbolrate: return str(symbolrate / 1000) return "" def createPolarization(self, fedata): return fedata.get("polarization_abbreviation") or "" def createFEC(self, fedata, feraw): if "DVB-T" in feraw.get("tuner_type"): code_rate_lp = fedata.get("code_rate_lp") code_rate_hp = fedata.get("code_rate_hp") if code_rate_lp and code_rate_hp: return code_rate_lp + "-" + code_rate_hp else: fec = fedata.get("fec_inner") if fec: return fec return "" def createModulation(self, fedata): if fedata.get("tuner_type") == _("Terrestrial"): constellation = fedata.get("constellation") if constellation: return constellation else: modulation = fedata.get("modulation") if modulation: return modulation return "" def createTunerType(self, feraw): return feraw.get("tuner_type") or "" def createTunerSystem(self, fedata): return fedata.get("system") or "" def createOrbPos(self, feraw): orbpos = feraw.get("orbital_position") if orbpos > 1800: return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W" elif orbpos > 0: return str((float(orbpos)) / 10.0) + "\xc2\xb0 E" return "" def createOrbPosOrTunerSystem(self, fedata,feraw): orbpos = self.createOrbPos(feraw) if orbpos is not "": return orbpos return self.createTunerSystem(fedata) def createProviderName(self, info): return info.getInfoString(iServiceInformation.sProvider) @cached def getText(self): service = self.source.service if service is None: return "" info = service and service.info() if not info: return "" if self.type == "CryptoInfo": self.getCryptoInfo(info) if config.usage.show_cryptoinfo.value: return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info) else: return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info) if self.type == "CryptoBar": self.getCryptoInfo(info) return self.createCryptoBar(info) if self.type == "CryptoSpecial": self.getCryptoInfo(info) return self.createCryptoSpecial(info) if self.type == "ResolutionString": return self.createResolution(info) if self.type == "VideoCodec": return self.createVideoCodec(info) if self.updateFEdata: feinfo = service.frontendInfo() if feinfo: self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings") if self.feraw: self.fedata = ConvertToHumanReadable(self.feraw) feraw = self.feraw if not feraw: feraw = info.getInfoObject(iServiceInformation.sTransponderData) fedata = ConvertToHumanReadable(feraw) else: fedata = self.fedata if self.type == "All": self.getCryptoInfo(info) if config.usage.show_cryptoinfo.value: return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \ + addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n" \ + addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info) else: return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \ + addspace(self.createCryptoBar(info)) + self.current_source + "\n" \ + addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info) if self.type == "PIDInfo": return self.createPIDInfo(info) if not feraw: return "" if self.type == "ServiceInfo": return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \ + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) \ + addspace(self.createVideoCodec(info)) + self.createResolution(info) if self.type == "TransponderInfo": return self.createTransponderInfo(fedata, feraw) if self.type == "TransponderFrequency": return self.createFrequency(feraw) if self.type == "TransponderSymbolRate": return self.createSymbolRate(fedata, feraw) if self.type == "TransponderPolarization": return self.createPolarization(fedata) if self.type == "TransponderFEC": return self.createFEC(fedata, feraw) if self.type == "TransponderModulation": return self.createModulation(fedata) if self.type == "OrbitalPosition": return self.createOrbPos(feraw) if self.type == "TunerType": return self.createTunerType(feraw) if self.type == "TunerSystem": return self.createTunerSystem(fedata) if self.type == "OrbitalPositionOrTunerSystem": return self.createOrbPosOrTunerSystem(fedata,feraw) if self.type == "TerrestrialChannelNumber": return self.createChannelNumber(fedata, feraw) return _("invalid type") text = property(getText) @cached def getBool(self): service = self.source.service info = service and service.info() if not info: return False request_caid = None for x in self.ca_table: if x[0] == self.type: request_caid = x[1] request_selected = x[2] break if request_caid is None: return False if info.getInfo(iServiceInformation.sIsCrypted) != 1: return False data = self.ecmdata.getEcmData() if data is None: return False current_caid = data[1] available_caids = info.getInfoObject(iServiceInformation.sCAIDs) for caid_entry in self.caid_data: if caid_entry[3] == request_caid: if(request_selected): if int(current_caid, 16) >= int(caid_entry[0], 16) and int(current_caid, 16) <= int(caid_entry[1], 16): return True else: # request available try: for caid in available_caids: if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16): return True except: pass return False boolean = property(getBool) def changed(self, what): if what[0] == self.CHANGED_SPECIFIC: self.updateFEdata = False if what[1] == iPlayableService.evNewProgramInfo: self.updateFEdata = True if what[1] == iPlayableService.evEnd: self.feraw = self.fedata = None Converter.changed(self, what) elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None: self.updateFEdata = False Converter.changed(self, what)
gpl-2.0
infected-lp/kernel_sony_msm8974
scripts/build-all.py
1474
10189
#! /usr/bin/env python # Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import re import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules", "dtbs"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'KCONFIG_NOTIMESTAMP': 'true' }) make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-') all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} arch_pats = ( r'[fm]sm[0-9]*_defconfig', r'apq*_defconfig', r'qsd*_defconfig', r'msmkrypton*_defconfig', ) for p in arch_pats: for n in glob.glob('arch/arm/configs/' + p): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) staging_dir = 'install_staging' modi_dir = '%s' % staging_dir hdri_dir = '%s/usr' % staging_dir shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: # Build targets can be dependent upon the completion of previous # build targets, so build them one at a time. cmd_line = ['make', 'INSTALL_HDR_PATH=%s' % hdri_dir, 'INSTALL_MOD_PATH=%s' % modi_dir, 'O=%s' % dest_dir] build_targets = [] for c in make_command: if re.match(r'^-{1,2}\w', c): cmd_line.append(c) else: build_targets.append(c) for t in build_targets: build = Builder(log_name) result = build.run(cmd_line + [t]) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
pschmitt/home-assistant
homeassistant/components/supla/cover.py
6
2904
"""Support for Supla cover - curtains, rollershutters, entry gate etc.""" import logging from pprint import pformat from homeassistant.components.cover import ( ATTR_POSITION, DEVICE_CLASS_GARAGE, CoverEntity, ) from homeassistant.components.supla import SuplaChannel _LOGGER = logging.getLogger(__name__) SUPLA_SHUTTER = "CONTROLLINGTHEROLLERSHUTTER" SUPLA_GATE = "CONTROLLINGTHEGATE" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Supla covers.""" if discovery_info is None: return _LOGGER.debug("Discovery: %s", pformat(discovery_info)) entities = [] for device in discovery_info: device_name = device["function"]["name"] if device_name == SUPLA_SHUTTER: entities.append(SuplaCover(device)) elif device_name == SUPLA_GATE: entities.append(SuplaGateDoor(device)) add_entities(entities) class SuplaCover(SuplaChannel, CoverEntity): """Representation of a Supla Cover.""" @property def current_cover_position(self): """Return current position of cover. 0 is closed, 100 is open.""" state = self.channel_data.get("state") if state: return 100 - state["shut"] return None def set_cover_position(self, **kwargs): """Move the cover to a specific position.""" self.action("REVEAL", percentage=kwargs.get(ATTR_POSITION)) @property def is_closed(self): """Return if the cover is closed.""" if self.current_cover_position is None: return None return self.current_cover_position == 0 def open_cover(self, **kwargs): """Open the cover.""" self.action("REVEAL") def close_cover(self, **kwargs): """Close the cover.""" self.action("SHUT") def stop_cover(self, **kwargs): """Stop the cover.""" self.action("STOP") class SuplaGateDoor(SuplaChannel, CoverEntity): """Representation of a Supla gate door.""" @property def is_closed(self): """Return if the gate is closed or not.""" state = self.channel_data.get("state") if state and "hi" in state: return state.get("hi") return None def open_cover(self, **kwargs) -> None: """Open the gate.""" if self.is_closed: self.action("OPEN_CLOSE") def close_cover(self, **kwargs) -> None: """Close the gate.""" if not self.is_closed: self.action("OPEN_CLOSE") def stop_cover(self, **kwargs) -> None: """Stop the gate.""" self.action("OPEN_CLOSE") def toggle(self, **kwargs) -> None: """Toggle the gate.""" self.action("OPEN_CLOSE") @property def device_class(self): """Return the class of this device, from component DEVICE_CLASSES.""" return DEVICE_CLASS_GARAGE
apache-2.0
h2oai/h2o
py/testdir_single_jvm/test_KMeans_twit_fvec.py
9
2516
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_kmeans, h2o_browse as h2b, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1, java_heap_GB=14) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_KMeans_twit_fvec(self): csvFilename = "Twitter2DB.txt" print "\nStarting", csvFilename # h2b.browseTheCloud() parseResult = h2i.import_parse(bucket='smalldata', path=csvFilename, hex_key=csvFilename + ".hex", schema='put') # both of these centers match what different R/Scikit packages get expected1 = [ # expected centers are from R. rest is just from h2o ([310527.2, 13433.89], 11340, None), ([5647967.1, 40487.76], 550, None), ([21765291.7, 93129.26], 14, None), ] # this is what we get with Furthest expected2 = [ ([351104.74065255735, 15421.749823633158], 11340, 5021682274541967.0) , ([7292636.589090909, 7575.630909090909], 550, 6373072701775582.0) , ([34406781.071428575, 244878.0], 14, 123310713697348.92) , ] # all are multipliers of expected tuple value allowedDelta = (0.0001, 0.0001, 0.0001) for trial in range(2): kwargs = { 'k': 3, 'max_iter': 50, 'normalize': 0, 'initialization': 'Furthest', # 'initialization': 'PlusPlus', 'destination_key': 'kmeans_dest_key', # reuse the same seed, to get deterministic results (otherwise sometimes fails 'seed': 265211114317615310 } init_choices = ['Furthest', 'PlusPlus'] kwargs['initialization'] = init_choices[trial % len(init_choices)] kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=15, **kwargs) # can't inspect a kmeans2 model? # inspect = h2o_cmd.runInspect(None, key=kmeans['model']['_key'], verbose=True) (centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvFilename, parseResult, 'd', **kwargs) h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected2, allowedDelta, trial=trial) if __name__ == '__main__': h2o.unit_main()
apache-2.0
bestwpw/mysql-5.6
xtrabackup/test/python/subunit/tests/test_subunit_filter.py
50
8259
# # subunit: extensions to python unittest to get test results from subprocesses. # Copyright (C) 2005 Robert Collins <[email protected]> # # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. # # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # license you chose for the specific language governing permissions and # limitations under that license. # """Tests for subunit.TestResultFilter.""" from datetime import datetime from subunit import iso8601 import unittest from testtools import TestCase from testtools.compat import _b, BytesIO, StringIO from testtools.testresult.doubles import ExtendedTestResult import subunit from subunit.test_results import TestResultFilter class TestTestResultFilter(TestCase): """Test for TestResultFilter, a TestResult object which filters tests.""" # While TestResultFilter works on python objects, using a subunit stream # is an easy pithy way of getting a series of test objects to call into # the TestResult, and as TestResultFilter is intended for use with subunit # also has the benefit of detecting any interface skew issues. example_subunit_stream = _b("""\ tags: global test passed success passed test failed tags: local failure failed test error error error [ error details ] test skipped skip skipped test todo xfail todo """) def run_tests(self, result_filter, input_stream=None): """Run tests through the given filter. :param result_filter: A filtering TestResult object. :param input_stream: Bytes of subunit stream data. If not provided, uses TestTestResultFilter.example_subunit_stream. """ if input_stream is None: input_stream = self.example_subunit_stream test = subunit.ProtocolTestCase(BytesIO(input_stream)) test.run(result_filter) def test_default(self): """The default is to exclude success and include everything else.""" filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result) self.run_tests(result_filter) # skips are seen as success by default python TestResult. self.assertEqual(['error'], [error[0].id() for error in filtered_result.errors]) self.assertEqual(['failed'], [failure[0].id() for failure in filtered_result.failures]) self.assertEqual(4, filtered_result.testsRun) def test_exclude_errors(self): filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, filter_error=True) self.run_tests(result_filter) # skips are seen as errors by default python TestResult. self.assertEqual([], filtered_result.errors) self.assertEqual(['failed'], [failure[0].id() for failure in filtered_result.failures]) self.assertEqual(3, filtered_result.testsRun) def test_fixup_expected_failures(self): filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, fixup_expected_failures=set(["failed"])) self.run_tests(result_filter) self.assertEqual(['failed', 'todo'], [failure[0].id() for failure in filtered_result.expectedFailures]) self.assertEqual([], filtered_result.failures) self.assertEqual(4, filtered_result.testsRun) def test_fixup_expected_errors(self): filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, fixup_expected_failures=set(["error"])) self.run_tests(result_filter) self.assertEqual(['error', 'todo'], [failure[0].id() for failure in filtered_result.expectedFailures]) self.assertEqual([], filtered_result.errors) self.assertEqual(4, filtered_result.testsRun) def test_fixup_unexpected_success(self): filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, filter_success=False, fixup_expected_failures=set(["passed"])) self.run_tests(result_filter) self.assertEqual(['passed'], [passed.id() for passed in filtered_result.unexpectedSuccesses]) self.assertEqual(5, filtered_result.testsRun) def test_exclude_failure(self): filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, filter_failure=True) self.run_tests(result_filter) self.assertEqual(['error'], [error[0].id() for error in filtered_result.errors]) self.assertEqual([], [failure[0].id() for failure in filtered_result.failures]) self.assertEqual(3, filtered_result.testsRun) def test_exclude_skips(self): filtered_result = subunit.TestResultStats(None) result_filter = TestResultFilter(filtered_result, filter_skip=True) self.run_tests(result_filter) self.assertEqual(0, filtered_result.skipped_tests) self.assertEqual(2, filtered_result.failed_tests) self.assertEqual(3, filtered_result.testsRun) def test_include_success(self): """Successes can be included if requested.""" filtered_result = unittest.TestResult() result_filter = TestResultFilter(filtered_result, filter_success=False) self.run_tests(result_filter) self.assertEqual(['error'], [error[0].id() for error in filtered_result.errors]) self.assertEqual(['failed'], [failure[0].id() for failure in filtered_result.failures]) self.assertEqual(5, filtered_result.testsRun) def test_filter_predicate(self): """You can filter by predicate callbacks""" filtered_result = unittest.TestResult() def filter_cb(test, outcome, err, details): return outcome == 'success' result_filter = TestResultFilter(filtered_result, filter_predicate=filter_cb, filter_success=False) self.run_tests(result_filter) # Only success should pass self.assertEqual(1, filtered_result.testsRun) def test_time_ordering_preserved(self): # Passing a subunit stream through TestResultFilter preserves the # relative ordering of 'time' directives and any other subunit # directives that are still included. date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC) date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC) date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC) subunit_stream = _b('\n'.join([ "time: %s", "test: foo", "time: %s", "error: foo", "time: %s", ""]) % (date_a, date_b, date_c)) result = ExtendedTestResult() result_filter = TestResultFilter(result) self.run_tests(result_filter, subunit_stream) foo = subunit.RemotedTestCase('foo') self.assertEquals( [('time', date_a), ('startTest', foo), ('time', date_b), ('addError', foo, {}), ('stopTest', foo), ('time', date_c)], result._events) def test_skip_preserved(self): subunit_stream = _b('\n'.join([ "test: foo", "skip: foo", ""])) result = ExtendedTestResult() result_filter = TestResultFilter(result) self.run_tests(result_filter, subunit_stream) foo = subunit.RemotedTestCase('foo') self.assertEquals( [('startTest', foo), ('addSkip', foo, {}), ('stopTest', foo), ], result._events) def test_suite(): loader = subunit.tests.TestUtil.TestLoader() result = loader.loadTestsFromName(__name__) return result
gpl-2.0
ayoubg/gem5-graphics
Mesa-7.11.2_GPGPU-Sim/src/gallium/drivers/i965/brw_structs_dump.py
34
8917
#!/usr/bin/env python ''' Generates dumpers for the i965 state strucutures using pygccxml. Run as PYTHONPATH=/path/to/pygccxml-1.0.0 python brw_structs_dump.py Jose Fonseca <[email protected]> ''' copyright = ''' /************************************************************************** * * Copyright 2009 VMware, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * **************************************************************************/ ''' import os import sys import re from pygccxml import parser from pygccxml import declarations from pygccxml.declarations import algorithm from pygccxml.declarations import decl_visitor from pygccxml.declarations import type_traits from pygccxml.declarations import type_visitor enums = True def vars_filter(variable): name = variable.name return not re.match('^pad\d*', name) and name != 'dword' class decl_dumper_t(decl_visitor.decl_visitor_t): def __init__(self, stream, instance = '', decl = None): decl_visitor.decl_visitor_t.__init__(self) self.stream = stream self._instance = instance self.decl = decl def clone(self): return decl_dumper_t(self.stream, self._instance, self.decl) def visit_class(self): class_ = self.decl assert self.decl.class_type in ('struct', 'union') for variable in class_.variables(recursive = False): if vars_filter(variable): dump_type(self.stream, self._instance + '.' + variable.name, variable.type) def visit_enumeration(self): if enums: self.stream.write(' switch(%s) {\n' % ("(*ptr)" + self._instance,)) for name, value in self.decl.values: self.stream.write(' case %s:\n' % (name,)) self.stream.write(' debug_printf("\\t\\t%s = %s\\n");\n' % (self._instance, name)) self.stream.write(' break;\n') self.stream.write(' default:\n') self.stream.write(' debug_printf("\\t\\t%s = %%i\\n", %s);\n' % (self._instance, "(*ptr)" + self._instance)) self.stream.write(' break;\n') self.stream.write(' }\n') else: self.stream.write(' debug_printf("\\t\\t%s = %%i\\n", %s);\n' % (self._instance, "(*ptr)" + self._instance)) def dump_decl(stream, instance, decl): dumper = decl_dumper_t(stream, instance, decl) algorithm.apply_visitor(dumper, decl) class type_dumper_t(type_visitor.type_visitor_t): def __init__(self, stream, instance, type_): type_visitor.type_visitor_t.__init__(self) self.stream = stream self.instance = instance self.type = type_ def clone(self): return type_dumper_t(self.instance, self.type) def visit_bool(self): self.print_instance('%i') def visit_char(self): #self.print_instance('%i') self.print_instance('0x%x') def visit_unsigned_char(self): #self.print_instance('%u') self.print_instance('0x%x') def visit_signed_char(self): #self.print_instance('%i') self.print_instance('0x%x') def visit_wchar(self): self.print_instance('0x%x') def visit_short_int(self): #self.print_instance('%i') self.print_instance('0x%x') def visit_short_unsigned_int(self): #self.print_instance('%u') self.print_instance('0x%x') def visit_int(self): #self.print_instance('%i') self.print_instance('0x%x') def visit_unsigned_int(self): #self.print_instance('%u') self.print_instance('0x%x') def visit_long_int(self): #self.print_instance('%li') self.print_instance('0x%lx') def visit_long_unsigned_int(self): #self.print_instance('%lu') self.print_instance('%0xlx') def visit_long_long_int(self): #self.print_instance('%lli') self.print_instance('%0xllx') def visit_long_long_unsigned_int(self): #self.print_instance('%llu') self.print_instance('0x%llx') def visit_float(self): self.print_instance('%f') def visit_double(self): self.print_instance('%f') def visit_array(self): for i in range(type_traits.array_size(self.type)): dump_type(self.stream, self.instance + '[%i]' % i, type_traits.base_type(self.type)) def visit_pointer(self): self.print_instance('%p') def visit_declarated(self): #stream.write('decl = %r\n' % self.type.decl_string) decl = type_traits.remove_declarated(self.type) dump_decl(self.stream, self.instance, decl) def print_instance(self, format): self.stream.write(' debug_printf("\\t\\t%s = %s\\n", %s);\n' % (self.instance, format, "(*ptr)" + self.instance)) def dump_type(stream, instance, type_): type_ = type_traits.remove_alias(type_) visitor = type_dumper_t(stream, instance, type_) algorithm.apply_visitor(visitor, type_) def dump_struct_interface(stream, class_, suffix = ';'): name = class_.name assert name.startswith('brw_'); name = name[:4] + 'dump_' + name[4:] stream.write('void\n') stream.write('%s(const struct %s *ptr)%s\n' % (name, class_.name, suffix)) def dump_struct_implementation(stream, decls, class_): dump_struct_interface(stream, class_, suffix = '') stream.write('{\n') dump_decl(stream, '', class_) stream.write('}\n') stream.write('\n') def dump_header(stream): stream.write(copyright.strip() + '\n') stream.write('\n') stream.write('/**\n') stream.write(' * @file\n') stream.write(' * Dump i965 data structures.\n') stream.write(' *\n') stream.write(' * Generated automatically from brw_structs.h by brw_structs_dump.py.\n') stream.write(' */\n') stream.write('\n') def dump_interfaces(decls, global_ns, names): stream = open('brw_structs_dump.h', 'wt') dump_header(stream) stream.write('#ifndef BRW_STRUCTS_DUMP_H\n') stream.write('#define BRW_STRUCTS_DUMP_H\n') stream.write('\n') for name in names: stream.write('struct %s;\n' % (name,)) stream.write('\n') for name in names: (class_,) = global_ns.classes(name = name) dump_struct_interface(stream, class_) stream.write('\n') stream.write('\n') stream.write('#endif /* BRW_STRUCTS_DUMP_H */\n') def dump_implementations(decls, global_ns, names): stream = open('brw_structs_dump.c', 'wt') dump_header(stream) stream.write('#include "util/u_debug.h"\n') stream.write('\n') stream.write('#include "brw_types.h"\n') stream.write('#include "brw_structs.h"\n') stream.write('#include "brw_structs_dump.h"\n') stream.write('\n') for name in names: (class_,) = global_ns.classes(name = name) dump_struct_implementation(stream, decls, class_) def decl_filter(decl): '''Filter the declarations we're interested in''' name = decl.name return name.startswith('brw_') and name not in ('brw_instruction',) def main(): config = parser.config_t( include_paths = [ '../../include', ], compiler = 'gcc', ) headers = [ 'brw_types.h', 'brw_structs.h', ] decls = parser.parse(headers, config, parser.COMPILATION_MODE.ALL_AT_ONCE) global_ns = declarations.get_global_namespace(decls) names = [] for class_ in global_ns.classes(decl_filter): names.append(class_.name) names.sort() dump_interfaces(decls, global_ns, names) dump_implementations(decls, global_ns, names) if __name__ == '__main__': main()
bsd-3-clause
Beauhurst/django
tests/auth_tests/test_basic.py
31
5389
from django.contrib.auth import get_user, get_user_model from django.contrib.auth.models import AnonymousUser, User from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.http import HttpRequest from django.test import TestCase, override_settings from django.utils import translation from .models import CustomUser class BasicTestCase(TestCase): def test_user(self): "Users can be created and can set their password" u = User.objects.create_user('testuser', '[email protected]', 'testpw') self.assertTrue(u.has_usable_password()) self.assertFalse(u.check_password('bad')) self.assertTrue(u.check_password('testpw')) # Check we can manually set an unusable password u.set_unusable_password() u.save() self.assertFalse(u.check_password('testpw')) self.assertFalse(u.has_usable_password()) u.set_password('testpw') self.assertTrue(u.check_password('testpw')) u.set_password(None) self.assertFalse(u.has_usable_password()) # Check username getter self.assertEqual(u.get_username(), 'testuser') # Check authentication/permissions self.assertFalse(u.is_anonymous) self.assertTrue(u.is_authenticated) self.assertFalse(u.is_staff) self.assertTrue(u.is_active) self.assertFalse(u.is_superuser) # Check API-based user creation with no password u2 = User.objects.create_user('testuser2', '[email protected]') self.assertFalse(u2.has_usable_password()) def test_unicode_username(self): User.objects.create_user('jörg') User.objects.create_user('Григорий') # Two equivalent unicode normalized usernames should be duplicates omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA ohm_username = 'iamtheΩ' # U+2126 OHM SIGN User.objects.create_user(ohm_username) with self.assertRaises(IntegrityError): User.objects.create_user(omega_username) def test_user_no_email(self): "Users can be created without an email" u = User.objects.create_user('testuser1') self.assertEqual(u.email, '') u2 = User.objects.create_user('testuser2', email='') self.assertEqual(u2.email, '') u3 = User.objects.create_user('testuser3', email=None) self.assertEqual(u3.email, '') def test_anonymous_user(self): "Check the properties of the anonymous user" a = AnonymousUser() self.assertIsNone(a.pk) self.assertEqual(a.username, '') self.assertEqual(a.get_username(), '') self.assertTrue(a.is_anonymous) self.assertFalse(a.is_authenticated) self.assertFalse(a.is_staff) self.assertFalse(a.is_active) self.assertFalse(a.is_superuser) self.assertEqual(a.groups.all().count(), 0) self.assertEqual(a.user_permissions.all().count(), 0) def test_superuser(self): "Check the creation and properties of a superuser" super = User.objects.create_superuser('super', '[email protected]', 'super') self.assertTrue(super.is_superuser) self.assertTrue(super.is_active) self.assertTrue(super.is_staff) def test_get_user_model(self): "The current user model can be retrieved" self.assertEqual(get_user_model(), User) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') def test_swappable_user(self): "The current user model can be swapped out for another" self.assertEqual(get_user_model(), CustomUser) with self.assertRaises(AttributeError): User.objects.all() @override_settings(AUTH_USER_MODEL='badsetting') def test_swappable_user_bad_setting(self): "The alternate user setting must point to something in the format app.model" with self.assertRaises(ImproperlyConfigured): get_user_model() @override_settings(AUTH_USER_MODEL='thismodel.doesntexist') def test_swappable_user_nonexistent_model(self): "The current user model must point to an installed model" with self.assertRaises(ImproperlyConfigured): get_user_model() def test_user_verbose_names_translatable(self): "Default User model verbose names are translatable (#19945)" with translation.override('en'): self.assertEqual(User._meta.verbose_name, 'user') self.assertEqual(User._meta.verbose_name_plural, 'users') with translation.override('es'): self.assertEqual(User._meta.verbose_name, 'usuario') self.assertEqual(User._meta.verbose_name_plural, 'usuarios') class TestGetUser(TestCase): def test_get_user_anonymous(self): request = HttpRequest() request.session = self.client.session user = get_user(request) self.assertIsInstance(user, AnonymousUser) def test_get_user(self): created_user = User.objects.create_user('testuser', '[email protected]', 'testpw') self.client.login(username='testuser', password='testpw') request = HttpRequest() request.session = self.client.session user = get_user(request) self.assertIsInstance(user, User) self.assertEqual(user.username, created_user.username)
bsd-3-clause
unho/pootle
pootle/apps/pootle_store/migrations/0025_unit_on_delete_user.py
7
1163
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-02 14:03 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import pootle.core.user class Migration(migrations.Migration): dependencies = [ ('pootle_store', '0024_set_store_base_manager_name'), ] operations = [ migrations.AlterField( model_name='unit', name='commented_by', field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='commented', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='unit', name='reviewed_by', field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='reviewed', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='unit', name='submitted_by', field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='submitted', to=settings.AUTH_USER_MODEL), ), ]
gpl-3.0
ajaybhat/scikit-image
skimage/measure/tests/test_fit.py
6
10401
import numpy as np from numpy.testing import assert_equal, assert_raises, assert_almost_equal from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac from skimage.transform import AffineTransform from skimage.measure.fit import _dynamic_max_trials from skimage._shared._warnings import expected_warnings def test_line_model_invalid_input(): assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3))) def test_line_model_predict(): model = LineModelND() model.params = ((0, 0), (1, 1)) x = np.arange(-10, 10) y = model.predict_y(x) assert_almost_equal(x, model.predict_x(y)) def test_line_model_estimate(): # generate original data without noise model0 = LineModelND() model0.params = ((0, 0), (1, 1)) x0 = np.arange(-100, 100) y0 = model0.predict_y(x0) data = np.column_stack([x0, y0]) # estimate parameters of noisy data model_est = LineModelND() model_est.estimate(data) # test whether estimated parameters almost equal original parameters random_state = np.random.RandomState(1234) x = random_state.rand(100, 2) assert_almost_equal(model0.predict(x), model_est.predict(x), 1) def test_line_model_residuals(): model = LineModelND() model.params = (np.array([0, 0]), np.array([0, 1])) assert_equal(model.residuals(np.array([[0, 0]])), 0) assert_equal(model.residuals(np.array([[0, 10]])), 0) assert_equal(model.residuals(np.array([[10, 0]])), 10) model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2)) assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2)) assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2)) def test_line_model_under_determined(): data = np.empty((1, 2)) assert_raises(ValueError, LineModelND().estimate, data) def test_line_modelND_invalid_input(): assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1))) def test_line_modelND_predict(): model = LineModelND() model.params = (np.array([0, 0]), np.array([0.2, 0.98])) x = np.arange(-10, 10) y = model.predict_y(x) assert_almost_equal(x, model.predict_x(y)) def test_line_modelND_estimate(): # generate original data without noise model0 = LineModelND() model0.params = (np.array([0,0,0], dtype='float'), np.array([1,1,1], dtype='float')/np.sqrt(3)) # we scale the unit vector with a factor 10 when generating points on the # line in order to compensate for the scale of the random noise data0 = (model0.params[0] + 10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1]) # add gaussian noise to data random_state = np.random.RandomState(1234) data = data0 + random_state.normal(size=data0.shape) # estimate parameters of noisy data model_est = LineModelND() model_est.estimate(data) # test whether estimated parameters are correct # we use the following geometric property: two aligned vectors have # a cross-product equal to zero # test if direction vectors are aligned assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], model_est.params[1])), 0, 1) # test if origins are aligned with the direction a = model_est.params[0] - model0.params[0] if np.linalg.norm(a) > 0: a /= np.linalg.norm(a) assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1) def test_line_modelND_residuals(): model = LineModelND() model.params = (np.array([0, 0, 0]), np.array([0, 0, 1])) assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0) assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0) assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10) def test_line_modelND_under_determined(): data = np.empty((1, 3)) assert_raises(ValueError, LineModelND().estimate, data) def test_circle_model_invalid_input(): assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3))) def test_circle_model_predict(): model = CircleModel() r = 5 model.params = (0, 0, r) t = np.arange(0, 2 * np.pi, np.pi / 2) xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5))) assert_almost_equal(xy, model.predict_xy(t)) def test_circle_model_estimate(): # generate original data without noise model0 = CircleModel() model0.params = (10, 12, 3) t = np.linspace(0, 2 * np.pi, 1000) data0 = model0.predict_xy(t) # add gaussian noise to data random_state = np.random.RandomState(1234) data = data0 + random_state.normal(size=data0.shape) # estimate parameters of noisy data model_est = CircleModel() model_est.estimate(data) # test whether estimated parameters almost equal original parameters assert_almost_equal(model0.params, model_est.params, 1) def test_circle_model_residuals(): model = CircleModel() model.params = (0, 0, 5) assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0) assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))), np.sqrt(2 * 6**2) - 5) assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5) def test_ellipse_model_invalid_input(): assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3))) def test_ellipse_model_predict(): model = EllipseModel() r = 5 model.params = (0, 0, 5, 10, 0) t = np.arange(0, 2 * np.pi, np.pi / 2) xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10))) assert_almost_equal(xy, model.predict_xy(t)) def test_ellipse_model_estimate(): # generate original data without noise model0 = EllipseModel() model0.params = (10, 20, 15, 25, 0) t = np.linspace(0, 2 * np.pi, 100) data0 = model0.predict_xy(t) # add gaussian noise to data random_state = np.random.RandomState(1234) data = data0 + random_state.normal(size=data0.shape) # estimate parameters of noisy data model_est = EllipseModel() model_est.estimate(data) # test whether estimated parameters almost equal original parameters assert_almost_equal(model0.params, model_est.params, 0) def test_ellipse_model_residuals(): model = EllipseModel() # vertical line through origin model.params = (0, 0, 10, 5, 0) assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0) assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0) assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5) def test_ransac_shape(): # generate original data without noise model0 = CircleModel() model0.params = (10, 12, 3) t = np.linspace(0, 2 * np.pi, 1000) data0 = model0.predict_xy(t) # add some faulty data outliers = (10, 30, 200) data0[outliers[0], :] = (1000, 1000) data0[outliers[1], :] = (-50, 50) data0[outliers[2], :] = (-100, -10) # estimate parameters of corrupted data model_est, inliers = ransac(data0, CircleModel, 3, 5, random_state=1) # test whether estimated parameters equal original parameters assert_equal(model0.params, model_est.params) for outlier in outliers: assert outlier not in inliers def test_ransac_geometric(): random_state = np.random.RandomState(1) # generate original data without noise src = 100 * random_state.random_sample((50, 2)) model0 = AffineTransform(scale=(0.5, 0.3), rotation=1, translation=(10, 20)) dst = model0(src) # add some faulty data outliers = (0, 5, 20) dst[outliers[0]] = (10000, 10000) dst[outliers[1]] = (-100, 100) dst[outliers[2]] = (50, 50) # estimate parameters of corrupted data model_est, inliers = ransac((src, dst), AffineTransform, 2, 20, random_state=random_state) # test whether estimated parameters equal original parameters assert_almost_equal(model0.params, model_est.params) assert np.all(np.nonzero(inliers == False)[0] == outliers) def test_ransac_is_data_valid(): is_data_valid = lambda data: data.shape[0] > 2 model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf, is_data_valid=is_data_valid, random_state=1) assert_equal(model, None) assert_equal(inliers, None) def test_ransac_is_model_valid(): def is_model_valid(model, data): return False model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf, is_model_valid=is_model_valid, random_state=1) assert_equal(model, None) assert_equal(inliers, None) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 5 assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 5, 1), np.inf) def test_ransac_invalid_input(): assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, residual_threshold=0, max_trials=-1) assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, residual_threshold=0, stop_probability=-1) assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, residual_threshold=0, stop_probability=1.01) if __name__ == "__main__": np.testing.run_module_suite()
bsd-3-clause
GotlingSystem/apnea
src/apps/dive_log/models.py
1
2496
# coding=utf-8 from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext as _ from discipline.models import Discipline class Session(models.Model): #pool = models.ForeignKey(Pool) user = models.ForeignKey(User) date = models.DateField(verbose_name=_(u'Datum')) time = models.TimeField(verbose_name=_(u'Tid')) comment = models.CharField(verbose_name=_(u'Kommentar'), max_length=512, blank=True) class Meta: verbose_name = _(u'Session') verbose_name_plural = _(u'Sessioner') ordering = ['-date', '-time'] def __unicode__(self): return "{} {}".format(self.date, self.time) class Dive(models.Model): session = models.ForeignKey(Session) discipline = models.ForeignKey(Discipline, verbose_name=_(u'Disciplin'), null=True, blank=True) rest_duration = models.DurationField(_(u'Vila'), null=True, blank=True) start = models.TimeField(null=True, blank=True) dive_duration = models.DurationField(_(u'Dyktid'), null=True, blank=True) distance = models.IntegerField(_(u'Distans'), help_text=_(u'i meter'), null=True) temperature = models.IntegerField(_(u'Temperatur'), help_text=_(u'i celsius'), null=True, blank=True) comment = models.CharField(_(u'Kommentar'), max_length=512, blank=True) # TODO: Tag migrations broken with Django 1.7.2 https://github.com/alex/django-taggit/issues/285 #tags = TaggableManager(verbose_name=_(u'Taggar'), blank=True) class Meta: verbose_name = _(u'Dyk') verbose_name_plural = _(u'Dyk') ordering = ['id'] def __unicode__(self): if self.discipline: return "{} - {}".format(self.discipline.abbreviation, str(self.dive_duration)) else: return str(self.dive_duration) class DataPoint(models.Model): dive = models.ForeignKey(Dive) second = models.IntegerField(verbose_name=_(u'Sekund')) depth = models.DecimalField(verbose_name=_(u'Djup'), decimal_places=1, max_digits=4, null=True, blank=True) temperature = models.DecimalField(verbose_name=_(u'Temperatur'), decimal_places=1, max_digits=3, null=True, blank=True) heart_rate = models.IntegerField(verbose_name=_(u'Puls'), null=True, blank=True) class Meta: verbose_name = _(u'Datapunkt') verbose_name_plural = _(u'Datapunkter') ordering = ['second'] def __unicode__(self): return u'{} - {} m'.format(self.second, self.depth)
mit
ThiagoGarciaAlves/intellij-community
python/lib/Lib/site-packages/django/conf/locale/ka/formats.py
329
1888
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'l, j F, Y' TIME_FORMAT = 'h:i:s a' DATETIME_FORMAT = 'j F, Y h:i:s a' YEAR_MONTH_FORMAT = 'F, Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j.M.Y' SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s' FIRST_DAY_OF_WEEK = 1 # (Monday) # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' # '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006' # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' # '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' ) DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = " " NUMBER_GROUPING = 3
apache-2.0
johankaito/fufuka
microblog/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py
2763
12628
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin7_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) win1253_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) # Model Table: # total sequences: 100% # first 512 sequences: 98.2851% # first 1024 sequences:1.7001% # rest sequences: 0.0359% # negative sequences: 0.0148% GreekLangModel = ( 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, 2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, 2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, 2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, 0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, 3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, 2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, 0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, 0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, 0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, 0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, 0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, 0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, 0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, 0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, 0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, 0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, 0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, 0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, 0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, 0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, 0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, 0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, 0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, 0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) Latin7GreekModel = { 'charToOrderMap': Latin7_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-7" } Win1253GreekModel = { 'charToOrderMap': win1253_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "windows-1253" } # flake8: noqa
apache-2.0
openstack/heat
heat/tests/engine/test_engine_worker.py
1
12872
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from heat.db.sqlalchemy import api as db_api from heat.engine import check_resource from heat.engine import stack as parser from heat.engine import template as templatem from heat.engine import worker from heat.objects import stack as stack_objects from heat.rpc import worker_client as wc from heat.tests import common from heat.tests import utils class WorkerServiceTest(common.HeatTestCase): def test_make_sure_rpc_version(self): self.assertEqual( '1.4', worker.WorkerService.RPC_API_VERSION, ('RPC version is changed, please update this test to new version ' 'and make sure additional test cases are added for RPC APIs ' 'added in new version')) @mock.patch('heat.common.messaging.get_rpc_server', return_value=mock.Mock()) @mock.patch('oslo_messaging.Target', return_value=mock.Mock()) @mock.patch('heat.rpc.worker_client.WorkerClient', return_value=mock.Mock()) def test_service_start(self, rpc_client_class, target_class, rpc_server_method ): self.worker = worker.WorkerService('host-1', 'topic-1', 'engine_id', mock.Mock()) self.worker.start() # Make sure target is called with proper parameters target_class.assert_called_once_with( version=worker.WorkerService.RPC_API_VERSION, server=self.worker.engine_id, topic=self.worker.topic) # Make sure rpc server creation with proper target # and WorkerService is initialized with it target = target_class.return_value rpc_server_method.assert_called_once_with(target, self.worker) rpc_server = rpc_server_method.return_value self.assertEqual(rpc_server, self.worker._rpc_server, "Failed to create RPC server") # Make sure rpc server is started. rpc_server.start.assert_called_once_with() # Make sure rpc client is created and initialized in WorkerService rpc_client = rpc_client_class.return_value rpc_client_class.assert_called_once_with() self.assertEqual(rpc_client, self.worker._rpc_client, "Failed to create RPC client") def test_service_stop(self): self.worker = worker.WorkerService('host-1', 'topic-1', 'engine_id', mock.Mock()) with mock.patch.object(self.worker, '_rpc_server') as mock_rpc_server: self.worker.stop() mock_rpc_server.stop.assert_called_once_with() mock_rpc_server.wait.assert_called_once_with() @mock.patch.object(check_resource, 'load_resource') @mock.patch.object(check_resource.CheckResource, 'check') def test_check_resource_adds_and_removes_msg_queue(self, mock_check, mock_load_resource): mock_tgm = mock.MagicMock() mock_tgm.add_msg_queue = mock.Mock(return_value=None) mock_tgm.remove_msg_queue = mock.Mock(return_value=None) self.worker = worker.WorkerService('host-1', 'topic-1', 'engine_id', mock_tgm) ctx = utils.dummy_context() current_traversal = 'something' fake_res = mock.MagicMock() fake_res.current_traversal = current_traversal mock_load_resource.return_value = (fake_res, fake_res, fake_res) self.worker.check_resource(ctx, mock.Mock(), current_traversal, {}, mock.Mock(), mock.Mock()) self.assertTrue(mock_tgm.add_msg_queue.called) self.assertTrue(mock_tgm.remove_msg_queue.called) @mock.patch.object(check_resource, 'load_resource') @mock.patch.object(check_resource.CheckResource, 'check') def test_check_resource_adds_and_removes_msg_queue_on_exception( self, mock_check, mock_load_resource): # even if the check fails; the message should be removed mock_tgm = mock.MagicMock() mock_tgm.add_msg_queue = mock.Mock(return_value=None) mock_tgm.remove_msg_queue = mock.Mock(return_value=None) self.worker = worker.WorkerService('host-1', 'topic-1', 'engine_id', mock_tgm) ctx = utils.dummy_context() current_traversal = 'something' fake_res = mock.MagicMock() fake_res.current_traversal = current_traversal mock_load_resource.return_value = (fake_res, fake_res, fake_res) mock_check.side_effect = BaseException self.assertRaises(BaseException, self.worker.check_resource, ctx, mock.Mock(), current_traversal, {}, mock.Mock(), mock.Mock()) self.assertTrue(mock_tgm.add_msg_queue.called) # ensure remove is also called self.assertTrue(mock_tgm.remove_msg_queue.called) @mock.patch.object(worker, '_wait_for_cancellation') @mock.patch.object(worker, '_cancel_check_resource') @mock.patch.object(wc.WorkerClient, 'cancel_check_resource') @mock.patch.object(db_api, 'engine_get_all_locked_by_stack') def test_cancel_workers_when_no_resource_found(self, mock_get_locked, mock_ccr, mock_wccr, mock_wc): mock_tgm = mock.Mock() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) stack = mock.MagicMock() stack.id = 'stack_id' mock_get_locked.return_value = [] worker._cancel_workers(stack, mock_tgm, 'engine-001', _worker._rpc_client) self.assertFalse(mock_wccr.called) self.assertFalse(mock_ccr.called) @mock.patch.object(worker, '_wait_for_cancellation') @mock.patch.object(worker, '_cancel_check_resource') @mock.patch.object(wc.WorkerClient, 'cancel_check_resource') @mock.patch.object(db_api, 'engine_get_all_locked_by_stack') def test_cancel_workers_with_resources_found(self, mock_get_locked, mock_ccr, mock_wccr, mock_wc): mock_tgm = mock.Mock() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) stack = mock.MagicMock() stack.id = 'stack_id' mock_get_locked.return_value = ['engine-001', 'engine-007', 'engine-008'] worker._cancel_workers(stack, mock_tgm, 'engine-001', _worker._rpc_client) mock_wccr.assert_called_once_with(stack.id, 'engine-001', mock_tgm) self.assertEqual(2, mock_ccr.call_count) calls = [mock.call(stack.context, stack.id, 'engine-007'), mock.call(stack.context, stack.id, 'engine-008')] mock_ccr.assert_has_calls(calls, any_order=True) self.assertTrue(mock_wc.called) @mock.patch.object(worker, '_stop_traversal') def test_stop_traversal_stops_nested_stack(self, mock_st): mock_tgm = mock.Mock() ctx = utils.dummy_context() tmpl = templatem.Template.create_empty_template() stack1 = parser.Stack(ctx, 'stack1', tmpl, current_traversal='123') stack1.store() stack2 = parser.Stack(ctx, 'stack2', tmpl, owner_id=stack1.id, current_traversal='456') stack2.store() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) _worker.stop_traversal(stack1) self.assertEqual(2, mock_st.call_count) call1, call2 = mock_st.call_args_list call_args1, call_args2 = call1[0][0], call2[0][0] self.assertEqual('stack1', call_args1.name) self.assertEqual('stack2', call_args2.name) @mock.patch.object(worker, '_stop_traversal') def test_stop_nested_traversal_stops_deeply_nested_stack(self, mock_st): mock_tgm = mock.Mock() ctx = utils.dummy_context() tmpl = templatem.Template.create_empty_template() stack1 = parser.Stack(ctx, 'stack1', tmpl, current_traversal='123') stack1.store() stack2 = parser.Stack(ctx, 'stack2', tmpl, owner_id=stack1.id, current_traversal='456') stack2.store() stack3 = parser.Stack(ctx, 'stack3', tmpl, owner_id=stack2.id, current_traversal='789') stack3.store() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) _worker.stop_traversal(stack2) self.assertEqual(2, mock_st.call_count) call1, call2 = mock_st.call_args_list call_args1, call_args2 = call1[0][0], call2[0][0] self.assertEqual('stack2', call_args1.name) self.assertEqual('stack3', call_args2.name) @mock.patch.object(worker, '_cancel_workers') @mock.patch.object(worker.WorkerService, 'stop_traversal') def test_stop_all_workers_when_stack_in_progress(self, mock_st, mock_cw): mock_tgm = mock.Mock() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) stack = mock.MagicMock() stack.IN_PROGRESS = 'IN_PROGRESS' stack.status = stack.IN_PROGRESS stack.id = 'stack_id' stack.rollback = mock.MagicMock() _worker.stop_all_workers(stack) mock_st.assert_not_called() mock_cw.assert_called_once_with(stack, mock_tgm, 'engine-001', _worker._rpc_client) self.assertFalse(stack.rollback.called) @mock.patch.object(worker, '_cancel_workers') @mock.patch.object(worker.WorkerService, 'stop_traversal') def test_stop_all_workers_when_stack_not_in_progress(self, mock_st, mock_cw): mock_tgm = mock.Mock() _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001', mock_tgm) stack = mock.MagicMock() stack.FAILED = 'FAILED' stack.status = stack.FAILED stack.id = 'stack_id' stack.rollback = mock.MagicMock() _worker.stop_all_workers(stack) self.assertFalse(mock_st.called) mock_cw.assert_called_once_with(stack, mock_tgm, 'engine-001', _worker._rpc_client) self.assertFalse(stack.rollback.called) # test when stack complete stack.FAILED = 'FAILED' stack.status = stack.FAILED _worker.stop_all_workers(stack) self.assertFalse(mock_st.called) mock_cw.assert_called_with(stack, mock_tgm, 'engine-001', _worker._rpc_client) self.assertFalse(stack.rollback.called) @mock.patch.object(stack_objects.Stack, 'select_and_update') def test_update_current_traversal(self, mock_sau): stack = mock.MagicMock() stack.current_traversal = 'some-thing' old_trvsl = stack.current_traversal worker._update_current_traversal(stack) self.assertNotEqual(old_trvsl, stack.current_traversal) mock_sau.assert_called_once_with(mock.ANY, stack.id, mock.ANY, exp_trvsl=old_trvsl)
apache-2.0
cvra/can-bootloader
client/tests/test_config_read_tool.py
1
2204
import unittest try: from unittest.mock import * except ImportError: from mock import * from msgpack import * from cvra_bootloader.read_config import main from cvra_bootloader.commands import * import sys import json class ReadConfigToolTestCase(unittest.TestCase): @patch('cvra_bootloader.utils.write_command_retry') @patch('cvra_bootloader.utils.write_command') @patch('cvra_bootloader.utils.open_connection') @patch('builtins.print') def test_integration(self, print_mock, open_conn, write_command, write_command_retry): sys.argv = "test.py -p /dev/ttyUSB0 0 1 2".split() configs = [{'id': i} for i in range(3)] write_command_retry.return_value = { i: packb(configs[i]) for i in range(3) } open_conn.return_value = object() main() write_command_retry.assert_any_call(open_conn.return_value, encode_read_config(), [0, 1, 2]) all_configs = {i: configs[i] for i in range(3)} print_mock.assert_any_call(json.dumps(all_configs, indent=4, sort_keys=True)) @patch('cvra_bootloader.utils.open_connection') @patch('cvra_bootloader.utils.write_command_retry') @patch('cvra_bootloader.utils.write_command') @patch('cvra_bootloader.utils.read_can_datagrams') @patch('builtins.print') def test_network_discovery(self, print_mock, read_can_datagram, write_command, write_command_retry, open_conn): """ Checks if we can perform a whole network discovery. """ sys.argv = "test.py -p /dev/ttyUSB0 --all".split() # The first two board answers the ping board_answers = [(b'', [0], i) for i in range(1, 3)] + [None] read_can_datagram.return_value = iter(board_answers) write_command_retry.return_value = { i: packb({'id': i}) for i in range(1, 3) } main() write_command.assert_any_call(open_conn.return_value, encode_ping(), list(range(1, 128)))
bsd-2-clause
pang-w/pyspider
pyspider/webui/app.py
4
3259
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<[email protected]> # http://binux.me # Created on 2014-02-22 23:17:13 import os import sys import logging logger = logging.getLogger("webui") from six import reraise from six.moves import builtins from six.moves.urllib.parse import urljoin from flask import Flask from pyspider.fetcher import tornado_fetcher if os.name == 'nt': import mimetypes mimetypes.add_type("text/css", ".css", True) class TornadoFlask(Flask): """Flask object running with tornado ioloop""" @property def logger(self): return logger def run(self, host=None, port=None, debug=None, **options): from werkzeug.serving import make_server, run_with_reloader if host is None: host = '127.0.0.1' if port is None: server_name = self.config['SERVER_NAME'] if server_name and ':' in server_name: port = int(server_name.rsplit(':', 1)[1]) else: port = 5000 if debug is not None: self.debug = bool(debug) #run_simple(host, port, self, **options) hostname = host port = port application = self use_reloader = self.debug use_debugger = self.debug if use_debugger: from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, True) def inner(): self.server = make_server(hostname, port, application) self.server.serve_forever() if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': display_hostname = hostname != '*' and hostname or 'localhost' if ':' in display_hostname: display_hostname = '[%s]' % display_hostname self.logger.info('webui running on http://%s:%d/', display_hostname, port) if use_reloader: run_with_reloader(inner) else: inner() def quit(self): if hasattr(self, 'server'): self.server.shutdown_signal = True self.logger.info('webui exiting...') app = TornadoFlask('webui', static_folder=os.path.join(os.path.dirname(__file__), 'static'), template_folder=os.path.join(os.path.dirname(__file__), 'templates')) app.secret_key = os.urandom(24) app.jinja_env.line_statement_prefix = '#' app.jinja_env.globals.update(builtins.__dict__) app.config.update({ 'fetch': lambda x: tornado_fetcher.Fetcher(None, None, async=False).fetch(x)[1], 'taskdb': None, 'projectdb': None, 'scheduler_rpc': None, }) def cdn_url_handler(error, endpoint, kwargs): if endpoint == 'cdn': path = kwargs.pop('path') # cdn = app.config.get('cdn', 'http://cdn.staticfile.org/') # cdn = app.config.get('cdn', '//cdnjs.cloudflare.com/ajax/libs/') cdn = app.config.get('cdn', '//cdnjscn.b0.upaiyun.com/libs/') return urljoin(cdn, path) else: exc_type, exc_value, tb = sys.exc_info() if exc_value is error: reraise(exc_type, exc_value, tb) else: raise error app.handle_url_build_error = cdn_url_handler
apache-2.0
diagramsoftware/odoomrp-utils
purchase_pricelist_partnerinfo/__openerp__.py
12
1572
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "Product pricelist partnerinfo - Purchase extension", "version": "1.0", "depends": [ "product_pricelist_partnerinfo", "purchase", ], "author": "OdooMRP team, " "AvanzOSC, " "Serv. Tecnol. Avanzados - Pedro M. Baeza", "website": "http://www.odoomrp.com", "contributors": [ "Oihane Crucelaegui <[email protected]>", "Pedro M. Baeza <[email protected]>", "Ana Juaristi <[email protected]>" ], "category": "Hidden/Dependency", "summary": "", "data": [ "views/pricelist_partnerinfo_view.xml" ], "installable": True, "auto_install": True, }
agpl-3.0
wido/cloudstack
tools/ngui/api.py
7
1481
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, #software distributed under the License is distributed on an #"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #KIND, either express or implied. See the License for the #specific language governing permissions and limitations #under the License. from requester import make_request from precache import apicache from config import * import re def get_error_code(error): return int(re.findall("\d{3}",error)[0]) #Find the error code by regular expression # return int(error[11:14]) #Ugly def get_command(verb, subject): commandlist = apicache.get(verb, None) if commandlist is not None: command = commandlist.get(subject, None) if command is not None: return command["name"] return None def apicall(command, data ): response, error = make_request(command, data, None, host, port, apikey, secretkey, protocol, path) if error is not None: return error, get_error_code(error) return response
apache-2.0
Belgabor/django
django/contrib/gis/gdal/prototypes/geom.py
315
4821
import re from datetime import date from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER from django.contrib.gis.gdal.envelope import OGREnvelope from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope from django.contrib.gis.gdal.prototypes.generation import \ const_string_output, double_output, geom_output, int_output, \ srs_output, string_output, void_output ### Generation routines specific to this module ### def env_func(f, argtypes): "For getting OGREnvelopes." f.argtypes = argtypes f.restype = None f.errcheck = check_envelope return f def pnt_func(f): "For accessing point information." return double_output(f, [c_void_p, c_int]) def topology_func(f): f.argtypes = [c_void_p, c_void_p] f.restype = c_int f.errchck = check_bool return f ### OGR_G ctypes function prototypes ### # GeoJSON routines, if supported. if GEOJSON: from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p]) to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True) to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True) else: from_json = False to_json = False to_kml = False # GetX, GetY, GetZ all return doubles. getx = pnt_func(lgdal.OGR_G_GetX) gety = pnt_func(lgdal.OGR_G_GetY) getz = pnt_func(lgdal.OGR_G_GetZ) # Geometry creation routines. from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2) from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1) create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int]) clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p]) get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int]) get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p]) geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p]) geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p]) geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p]) geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p]) geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p]) # Geometry modification routines. add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p]) import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)]) # Destroys a geometry destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False) # Geometry export routines. to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB. to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)]) to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True) get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p]) # Geometry spatial-reference related routines. assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False) get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p]) # Geometry properties get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p]) get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p]) get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p]) get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p]) set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False) get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p]) get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p]) get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p]) get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p]) get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False) geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False) # Topology routines. ogr_contains = topology_func(lgdal.OGR_G_Contains) ogr_crosses = topology_func(lgdal.OGR_G_Crosses) ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint) ogr_equals = topology_func(lgdal.OGR_G_Equals) ogr_intersects = topology_func(lgdal.OGR_G_Intersects) ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps) ogr_touches = topology_func(lgdal.OGR_G_Touches) ogr_within = topology_func(lgdal.OGR_G_Within) # Transformation routines. geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p]) geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p]) # For retrieving the envelope of the geometry. get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
bsd-3-clause
apdjustino/DRCOG_Urbansim
src/opus_core/resource_factory.py
1
2293
# Opus/UrbanSim urban simulation software. # Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington # See opus_core/LICENSE from opus_core.resources import Resources from opus_core.storage_factory import StorageFactory class ResourceFactory(object): """ Class for creating a Resource object. """ def get_resources_for_dataset(self, dataset_name, in_storage, out_storage, resources={}, in_table_name_pair=(None,None), out_table_name_pair=(None,None), attributes_pair=(None,None), id_name_pair=(None,None), nchunks_pair=(None,None), debug_pair=(None,None) ): """Create an object of class Resources to be used in a Dataset object. The created resources are merged with the resources given as an argument 'resources'. The first element of each tuple of the remaining arguments contains the desired value, the second element contains the default value which is used if the first element is None. Entries in resources of the same name as the argument values are overwritten if the one of the tuple values is not equal None. """ # merge resources with arguments local_resources = Resources(resources) local_resources.merge_if_not_None({ "in_storage":in_storage, "out_storage":out_storage, "nchunks":nchunks_pair[0], "attributes":attributes_pair[0], "in_table_name": in_table_name_pair[0], "out_table_name": out_table_name_pair[0], "id_name":id_name_pair[0], "debug":debug_pair[0], "dataset_name":dataset_name}) # merge resources with default values local_resources.merge_with_defaults({ "nchunks":nchunks_pair[1], "attributes":attributes_pair[1], "in_table_name":in_table_name_pair[1], "out_table_name":out_table_name_pair[1], "id_name":id_name_pair[1], "debug":debug_pair[1], "dataset_name":dataset_name}) return local_resources
agpl-3.0
andrewpollock/grpc
tools/gcp/stress_test/run_server.py
37
5970
#!/usr/bin/env python2.7 # Copyright 2015-2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import datetime import os import resource import select import subprocess import sys import time from stress_test_utils import BigQueryHelper from stress_test_utils import EventType def run_server(): """This is a wrapper around the interop server and performs the following: 1) Create a 'Summary table' in Big Query to record events like the server started, completed successfully or failed. NOTE: This also creates another table called the QPS table which is currently NOT needed on the server (it is needed on the stress test clients) 2) Start the server process and add a row in Big Query summary table 3) Wait for the server process to terminate. The server process does not terminate unless there is an error. If the server process terminated with a failure, add a row in Big Query and wait forever. NOTE: This script typically runs inside a GKE pod which means that the pod gets destroyed when the script exits. However, in case the server process fails, we would not want the pod to be destroyed (since we might want to connect to the pod for examining logs). This is the reason why the script waits forever in case of failures. """ # Set the 'core file' size to 'unlimited' so that 'core' files are generated # if the server crashes (Note: This is not relevant for Java and Go servers) resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) # Read the parameters from environment variables env = dict(os.environ) run_id = env['RUN_ID'] # The unique run id for this test image_type = env['STRESS_TEST_IMAGE_TYPE'] stress_server_cmd = env['STRESS_TEST_CMD'].split() args_str = env['STRESS_TEST_ARGS_STR'] pod_name = env['POD_NAME'] project_id = env['GCP_PROJECT_ID'] dataset_id = env['DATASET_ID'] summary_table_id = env['SUMMARY_TABLE_ID'] qps_table_id = env['QPS_TABLE_ID'] # The following parameter is to inform us whether the server runs forever # until forcefully stopped or will it naturally stop after sometime. # This way, we know that the process should not terminate (even if it does # with a success exit code) and flag any termination as a failure. will_run_forever = env.get('WILL_RUN_FOREVER', '1') logfile_name = env.get('LOGFILE_NAME') print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, ' 'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id, run_id, dataset_id, summary_table_id, qps_table_id) bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id, dataset_id, summary_table_id, qps_table_id) bq_helper.initialize() # Create BigQuery Dataset and Tables: Summary Table and Metrics Table if not bq_helper.setup_tables(): print 'Error in creating BigQuery tables' return start_time = datetime.datetime.now() logfile = None details = 'Logging to stdout' if logfile_name is not None: print 'Opening log file: ', logfile_name logfile = open(logfile_name, 'w') details = 'Logfile: %s' % logfile_name stress_cmd = stress_server_cmd + [x for x in args_str.split()] details = '%s, Stress server command: %s' % (details, str(stress_cmd)) # Update status that the test is starting (in the status table) bq_helper.insert_summary_row(EventType.STARTING, details) print 'Launching process %s ...' % stress_cmd stress_p = subprocess.Popen(args=stress_cmd, stdout=logfile, stderr=subprocess.STDOUT) # Update the status to running if subprocess.Popen launched the server if stress_p.poll() is None: bq_helper.insert_summary_row(EventType.RUNNING, '') # Wait for the server process to terminate returncode = stress_p.wait() if will_run_forever == '1' or returncode != 0: end_time = datetime.datetime.now().isoformat() event_type = EventType.FAILURE details = 'Returncode: %d; End time: %s' % (returncode, end_time) bq_helper.insert_summary_row(event_type, details) print 'Waiting indefinitely..' select.select([], [], []) return returncode if __name__ == '__main__': run_server()
bsd-3-clause
lukas-krecan/tensorflow
tensorflow/python/ops/linalg_grad.py
17
2430
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in linalg_ops.py. Useful reference for derivative formulas is An extended collection of matrix derivative results for forward and reverse mode algorithmic differentiation by Mike Giles: http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import constant_op from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops @ops.RegisterGradient("MatrixInverse") def _MatrixInverseGrad(op, grad): """Gradient for MatrixInverse.""" ainv = op.outputs[0] return -math_ops.matmul(ainv, math_ops.matmul(grad, ainv, transpose_b=True), transpose_a=True) @ops.RegisterGradient("BatchMatrixInverse") def _BatchMatrixInverseGrad(op, grad): """Gradient for BatchMatrixInverse.""" ainv = op.outputs[0] return -math_ops.batch_matmul(ainv, math_ops.batch_matmul(grad, ainv, adj_y=True), adj_x=True) @ops.RegisterGradient("MatrixDeterminant") def _MatrixDeterminantGrad(op, grad): """Gradient for MatrixDeterminant. Returns: gradient Args: op: op grad: grad """ a = op.inputs[0] c = op.outputs[0] ainv = linalg_ops.matrix_inverse(a) return grad * c * array_ops.transpose(ainv)
apache-2.0
pombredanne/MOG
tools/install_venv_common.py
15
7428
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def post_process(self): self.get_distro().post_process() def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) def post_process(self): """Any distribution-specific post-processing gets done here. In particular, this is useful for applying patches to code inside the venv. """ pass class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def apply_patch(self, originalfile, patchfile): self.run_command(['patch', '-N', originalfile, patchfile], check_exit_code=False) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() def post_process(self): """Workaround for a bug in eventlet. This currently affects RHEL6.1, but the fix can safely be applied to all RHEL and Fedora distributions. This can be removed when the fix is applied upstream. Nova: https://bugs.launchpad.net/nova/+bug/884915 Upstream: https://bitbucket.org/eventlet/eventlet/issue/89 RHEL: https://bugzilla.redhat.com/958868 """ if os.path.exists('contrib/redhat-eventlet.patch'): # Install "patch" program if it's not there if not self.check_pkg('patch'): self.die("Please install 'patch'.") # Apply the eventlet patch self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, 'site-packages', 'eventlet/green/subprocess.py'), 'contrib/redhat-eventlet.patch')
apache-2.0
lbjay/cds-invenio
modules/bibformat/lib/elements/bfe_photos.py
4
5570
# -*- coding: utf-8 -*- ## ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Print photos of the record (if bibdoc file) """ import cgi from invenio.bibdocfile import BibRecDocs from invenio.urlutils import create_html_link def format(bfo, separator=" ", style='', img_style='', text_style='font-size:small', print_links='yes', max_photos='', show_comment='yes', img_max_width='250px', display_all_version_links='yes'): """ Lists the photos of a record. Display the icon version, linked to its original version. This element works for photos appended to a record as BibDoc files, for which a preview icon has been generated. If there are several formats for one photo, use the first one found. @param separator: separator between each photo @param print_links: if 'yes', print links to the original photo @param style: style attributes of the whole image block. Eg: "padding:2px;border:1px" @param img_style: style attributes of the images. Eg: "width:50px;border:none" @param text_style: style attributes of the text. Eg: "font-size:small" @param max_photos: the maximum number of photos to display @param show_comment: if 'yes', display the comment of each photo @param display_all_version_links: if 'yes', print links to additional (sub)formats """ photos = [] bibarchive = BibRecDocs(bfo.recID) bibdocs = bibarchive.list_bibdocs() if max_photos.isdigit(): max_photos = int(max_photos) else: max_photos = len(bibdocs) for doc in bibdocs[:max_photos]: found_icons = [] found_url = '' for docfile in doc.list_latest_files(): if docfile.is_icon(): found_icons.append((docfile.get_size(), docfile.get_url())) else: found_url = docfile.get_url() found_icons.sort() if found_icons: additional_links = '' name = doc.get_docname() comment = doc.list_latest_files()[0].get_comment() preview_url = None if len(found_icons) > 1: preview_url = found_icons[1][1] additional_urls = [(docfile.get_size(), docfile.get_url(), \ docfile.get_superformat(), docfile.get_subformat()) \ for docfile in doc.list_latest_files() if not docfile.is_icon()] additional_urls.sort() additional_links = [create_html_link(url, urlargd={}, \ linkattrd={'style': 'font-size:x-small'}, \ link_label="%s %s (%s)" % (format.strip('.').upper(), subformat, format_size(size))) \ for (size, url, format, subformat) in additional_urls] img = '<img src="%(icon_url)s" alt="%(name)s" style="max-width:%(img_max_width)s;_width:%(img_max_width)s;%(img_style)s" />' % \ {'icon_url': cgi.escape(found_icons[0][1], True), 'name': cgi.escape(name, True), 'img_style': img_style, 'img_max_width': img_max_width} if print_links.lower() == 'yes': img = '<a href="%s">%s</a>' % (cgi.escape(preview_url or found_url, True), img) if display_all_version_links.lower() == 'yes' and additional_links: img += '<br />' + '&nbsp;'.join(additional_links) + '<br />' if show_comment.lower() == 'yes' and comment: img += '<div style="margin-auto;text-align:center;%(text_style)s">%(comment)s</div>' % \ {'comment': comment.replace('\n', '<br/>'), 'text_style': text_style} img = '<div style="vertical-align: middle;text-align:center;display:inline-block;display: -moz-inline-stack;zoom: 1;*display: inline;max-width:%(img_max_width)s;_width:%(img_max_width)s;text-align:center;%(style)s">%(img)s</div>' % \ {'img_max_width': img_max_width, 'style': style, 'img': img} photos.append(img) return '<div>' + separator.join(photos) + '</div>' def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0 def format_size(size): """ Get human-readable string for the given size in Bytes """ if size < 1024: return "%d byte%s" % (size, size != 1 and 's' or '') elif size < 1024 * 1024: return "%.1f KB" % (size / 1024) elif size < 1024 * 1024 * 1024: return "%.1f MB" % (size / (1024 * 1024)) else: return "%.1f GB" % (size / (1024 * 1024 * 1024))
gpl-2.0
purepitch/trove
tests/test_view.py
1
2089
# -*- coding: utf-8 -*- import unittest import StringIO import sys import re from modules.view import View class TestView(unittest.TestCase): def setUp(self): self.view = View() def testPrintInfoExpectsArgument(self): with self.assertRaises(TypeError): self.view.print_line() def testPrintInfoPrintsInput(self): message = "test message" old_stdout = sys.stdout sys.stdout = StringIO.StringIO() self.view.print_line(message) received_stdout = sys.stdout.getvalue().strip() sys.stdout = old_stdout self.assertEqual(received_stdout, message) def testPrintBoldExpectsArgument(self): with self.assertRaises(TypeError): self.view.print_bold() def testPrintBoldPrintsMessageInAnsiBold(self): message = "bold message" old_stdout = sys.stdout sys.stdout = StringIO.StringIO() self.view.print_bold(message) received_stdout = sys.stdout.getvalue().strip() sys.stdout = old_stdout bold_message = "\x1b[01m" + message + "\x1b[0m" self.assertEqual(received_stdout, bold_message) def testPrintErrorExpectsArgument(self): with self.assertRaises(TypeError): self.view.print_error() def testPrintErrorPrintsMessageInAnsiBold(self): message = "error message" old_stdout = sys.stdout sys.stdout = StringIO.StringIO() self.view.print_error(message) received_stdout = sys.stdout.getvalue() sys.stdout = old_stdout formatted_message = "\x1b[01m" + message + "\x1b[0m\n" self.assertEqual(received_stdout, formatted_message) def testPrintHelpPrintsAvailableCommandsToStdout(self): old_stdout = sys.stdout sys.stdout = StringIO.StringIO() self.view.print_help() received_stdout = sys.stdout.getvalue() sys.stdout = old_stdout regexp = re.compile('Available commands:') self.assertRegexpMatches(received_stdout, regexp) # vim: expandtab shiftwidth=4 softtabstop=4
gpl-3.0
databrary/curation
tools/scripts/utils/openproject/update.py
1
5971
#!/usr/bin/env python2.7 import sys, os '''smoother more informative version check''' if sys.version_info >= (3, 0, 0): sys.exit("You need to run this with python 2.7, exiting now so you can get your stuff together") '''Run from ../scripts/ with `python -m utils.openproject.update`''' import json from .. import dbclient from config import conn as c import requests import argparse from pprint import pprint '''quick and dirty command line tool that automatically updates new volumes to openproject via the openproject api''' _QUERIES = { "db_volumes":"select v.id, volume_creation(v.id), v.name, owners from volume v left join volume_owners o ON v.id = o.volume where v.id > 3 order by v.id;", "op_parties":"select wp.id, wp.subject, cv.* from work_packages wp left join custom_values cv on cv.customized_id = wp.id where cv.customized_type = 'WorkPackage' and wp.type_id = 6 and wp.project_id = 12 and cv.custom_field_id = 29 order by wp.id asc;", "op_workpackages": "select wp.id, wp.type_id, wp.project_id, wp.parent_id, wp.category_id, wp.created_at, wp.start_date, cv.* from work_packages wp left join custom_values cv on cv.customized_id = wp.id where cv.customized_type = 'WorkPackage' and project_id = 14 and cv.custom_field_id = 29 order by wp.id asc;" } def wp_vols(data): return sorted([d[11] for d in data if d[11] != None and d[11] != ''], key=lambda x: float(x)) def getnew(op_vols, db_vols): return [z for z in db_vols if str(z[0]) not in op_vols] def getdel(op_vols, db_vols): vols_only = [] for d in db_vols: vols_only.append(d[0]) return [z for z in op_vols if int(z) not in vols_only] def getData(vol_data, party_data): nl = [] for v in vol_data: d = {} owner_id = v[3][0].split(':')[0] if v[3] is not None else None if owner_id is not None: parent_id = [p[0] for p in party_data if p[6]==owner_id] else: parent_id = None d["owner_id"] = owner_id d["parent_id"] = parent_id d["volume_id"] = v[0] d["start_date"] = v[1] d["title"] = v[2] nl.append(d) return nl def prepareData(data): fresh_data = [] for i in data: record = { "customField37": True, "description": { "format": "textile", "raw": "" }, "_links": { "type": {"href":"project/api/v3/types/16"}, "status":{"href":"project/api/v3/statuses/1"}, "priority":{"href":"project/api/v3/priorities/3"} } } if type(i['parent_id']) == list: if i['parent_id'] != []: pid = int(i['parent_id'][0]) else: pid = None else: pid = None desc = "Opened: %s" % (i['start_date'].strftime('%Y-%m-%d')) record['subject'] = i['title'] record['description']['raw'] = desc record['parentId'] = pid record['customField29'] = int(i['volume_id']) fresh_data.append(record) return fresh_data def prepareDel(del_vols, workpackages): return [w[0] for w in workpackages if w[11] in del_vols] def insert_vols(data): data = json.dumps(data) return requests.post(c.API_POST_TARGET, auth=("apikey", c.API_KEY), data=data, headers={"Content-Type": "application/json"}) if __name__ == '__main__': ######################### /command line argument handling ################################ parser = argparse.ArgumentParser(description='quick and dirty command line tool that automatically updates new volumes to openproject via the openproject api') parser.add_argument('-r', '--runnow', help='Arg to make update run now', required=False, action='store_true') args = vars(parser.parse_args()) RUNNOW = args['runnow'] ######################### /command line argument handling ################################ db_DB = dbclient.DB(c.db['HOST'], c.db['DATABASE'], c.db['USER'], c.db['PASSWORD'], c.db['PORT']) op_DB = dbclient.DB(c.op['HOST'], c.op['DATABASE'], c.op['USER'], c.op['PASSWORD'], c.op['PORT']) # # 1 go to dbrary and get all volumes (id, owner id) # db_volumes = db_DB.query(_QUERIES['db_volumes']) # # 2 get all wp from op for the volumes project with volume id # op_workpackages = op_DB.query(_QUERIES['op_workpackages']) # - index the volumes in already volumes_in_op = wp_vols(op_workpackages) # # 3a compare data and determine all of the volumes in dbrary that need to be added # vols_to_add = getnew(volumes_in_op, db_volumes) print("%s new volumes to be added" % str(len(vols_to_add))) # # 3b determine which volumes have been added to op, but no longer exist in db # vols_to_del = getdel(volumes_in_op, db_volumes) print("%s volumes have been deleted" % str(len(vols_to_del))) # # 4a prepare data for adding to wp # - get user information same way as get volume info from wp as #2 op_parties = op_DB.query(_QUERIES['op_parties']) raw_data = getData(vols_to_add, op_parties) ready_data = prepareData(raw_data) print("To be added:") pprint(ready_data) # # # 4b prepare which volumes we need to edit, got back to op_workpackages and get wp ids by vol in vols_to_del # del_data = prepareDel(vols_to_del, op_workpackages) print("the following workpackages should be deleted: %s" % str(del_data)) # # 5 insert these records via the api (POST - /project/api/v3/projects/volumes/work_packages) # if RUNNOW: print("sending volumes to tickets...") for r in ready_data: insert_vols(r) else: print("Run with `-r` to insert outstanding volumes into ticket flow") # # 6 remove the deleted volumes # if op api actually allowed this to happen # # 7 close up data base, die # del db_DB del op_DB
gpl-3.0
kustodian/ansible
lib/ansible/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py
37
6115
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Evgeniy Krysanov <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community', } DOCUMENTATION = r''' --- module: bitbucket_pipeline_key_pair short_description: Manages Bitbucket pipeline SSH key pair description: - Manages Bitbucket pipeline SSH key pair. version_added: "2.8" author: - Evgeniy Krysanov (@catcombo) options: client_id: description: - OAuth consumer key. - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. type: str client_secret: description: - OAuth consumer secret. - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. type: str repository: description: - The repository name. type: str required: true username: description: - The repository owner. type: str required: true public_key: description: - The public key. type: str private_key: description: - The private key. type: str state: description: - Indicates desired state of the key pair. type: str required: true choices: [ absent, present ] notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Check mode is supported. ''' EXAMPLES = r''' - name: Create or update SSH key pair bitbucket_pipeline_key_pair: repository: 'bitbucket-repo' username: bitbucket_username public_key: '{{lookup("file", "bitbucket.pub") }}' private_key: '{{lookup("file", "bitbucket") }}' state: present - name: Remove SSH key pair bitbucket_pipeline_key_pair: repository: bitbucket-repo username: bitbucket_username state: absent ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { 'invalid_params': 'Account, repository or SSH key pair was not found', 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', } BITBUCKET_API_ENDPOINTS = { 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, } def get_existing_ssh_key_pair(module, bitbucket): """ Retrieves an existing ssh key pair from repository specified in module param `repository` :param module: instance of the :class:`AnsibleModule` :param bitbucket: instance of the :class:`BitbucketHelper` :return: existing key pair or None if not found :rtype: dict or None Return example:: { "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", "type": "pipeline_ssh_key_pair" } """ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( username=module.params['username'], repo_slug=module.params['repository'], ) info, content = bitbucket.request( api_url=api_url, method='GET', ) if info['status'] == 404: # Account, repository or SSH key pair was not found. return None return content def update_ssh_key_pair(module, bitbucket): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( username=module.params['username'], repo_slug=module.params['repository'], ), method='PUT', data={ 'private_key': module.params['private_key'], 'public_key': module.params['public_key'], }, ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_params']) if info['status'] != 200: module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) def delete_ssh_key_pair(module, bitbucket): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( username=module.params['username'], repo_slug=module.params['repository'], ), method='DELETE', ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_params']) if info['status'] != 204: module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), username=dict(type='str', required=True), public_key=dict(type='str'), private_key=dict(type='str', no_log=True), state=dict(type='str', choices=['present', 'absent'], required=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) bitbucket = BitbucketHelper(module) state = module.params['state'] public_key = module.params['public_key'] private_key = module.params['private_key'] # Check parameters if ((public_key is None) or (private_key is None)) and (state == 'present'): module.fail_json(msg=error_messages['required_keys']) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() # Retrieve existing ssh key key_pair = get_existing_ssh_key_pair(module, bitbucket) changed = False # Create or update key pair if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): if not module.check_mode: update_ssh_key_pair(module, bitbucket) changed = True # Delete key pair elif key_pair and (state == 'absent'): if not module.check_mode: delete_ssh_key_pair(module, bitbucket) changed = True module.exit_json(changed=changed) if __name__ == '__main__': main()
gpl-3.0