gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from __future__ import division, unicode_literals from itertools import permutations, product from random import Random import numpy as np import pytest try: from unittest import mock except ImportError: import mock from catpy import CoordinateTransformer from catpy.spatial import StackOrientation COUNT = 20 SEED = 1 DIMS = "xyz" DIRECTIONS = "stack_to_project", "project_to_stack" EXAMPLE_COORD = 10 ZOOM_LEVELS = (-2, 0, 1) @pytest.fixture def default_res(): return {"x": 1.0, "y": 2.0, "z": 3.0} @pytest.fixture def default_trans(): return {"x": 10.0, "y": 20.0, "z": 30.0} default_orientation = "XY" @pytest.fixture def coordinate_generator(): """Return a generator which returns `count` randomly-generated coordinates in the range [-1000, 1000], in a mixture of floats and ints""" def wrapped(count=COUNT, seed=SEED): twister = Random(seed) def rand(is_int=False): n = (twister.random() - 0.5) * 1000 return n if not is_int else int(n) for _ in range(count): is_int = twister.random() > 0.5 yield {dim: rand(is_int) for dim in DIMS} return wrapped @pytest.fixture def catmaid_mock(default_res, default_trans): catmaid = mock.Mock() stack_info = { "resolution": default_res, "translation": default_trans, "orientation": default_orientation, } catmaid.configure_mock(**{"get.return_value": stack_info}) return catmaid @pytest.fixture def default_coord_transformer(default_res, default_trans): return CoordinateTransformer(default_res, default_trans) @pytest.mark.parametrize("has_res", [True, False]) @pytest.mark.parametrize("has_trans", [True, False]) def test_instantiate(default_res, default_trans, has_res, has_trans): if not has_res: default_res = None if not has_trans: default_trans = None assert CoordinateTransformer(default_res, default_trans) def test_from_catmaid(default_coord_transformer, catmaid_mock): assert ( CoordinateTransformer.from_catmaid(catmaid_mock, None) == default_coord_transformer ) @pytest.mark.parametrize("dim", DIMS) def test_project_to_stack_coord( dim, default_coord_transformer, default_res, default_trans ): project_coord = EXAMPLE_COORD expected_response = (project_coord - default_trans[dim]) / default_res[dim] response = default_coord_transformer.project_to_stack_coord(dim, project_coord) assert response[1] == expected_response @pytest.mark.parametrize("dim", DIMS) def test_stack_to_project_coord( dim, default_coord_transformer, default_res, default_trans ): stack_coord = EXAMPLE_COORD expected_response = (stack_coord * default_res[dim]) + default_trans[dim] response = default_coord_transformer.stack_to_project_coord(dim, stack_coord) assert response[1] == expected_response @pytest.mark.parametrize("direction", DIRECTIONS) def test_stack_to_project_and_project_to_stack( coordinate_generator, default_coord_transformer, direction ): for coords in coordinate_generator(): expected_response = dict( getattr(default_coord_transformer, direction + "_coord")(dim, coord) for dim, coord in coords.items() ) actual_response = getattr(default_coord_transformer, direction)(coords) assert expected_response == actual_response def get_expected_array_response( coordinate_transformer, direction, coords_list, dims=DIMS ): """ Parameters ---------- coordinate_transformer : CoordinateTransformer direction : str 'project_to_stack' or 'stack_to_project' coords_list : list of dict dicts are of form {'x': number, 'y': number, 'z': number} dims : iterable dimension order, default 'xyz' Returns ------- np.array """ output = [] for coords in coords_list: transformed = getattr(coordinate_transformer, direction)(coords) output.append([transformed[dim] for dim in dims]) return np.array(output) @pytest.mark.parametrize("dims", permutations("xyz")) @pytest.mark.parametrize("direction", DIRECTIONS) def test_arrays(coordinate_generator, default_coord_transformer, direction, dims): coords_list = list(coordinate_generator()) coords_array = np.array([[coords[dim] for dim in dims] for coords in coords_list]) expected_response = get_expected_array_response( default_coord_transformer, direction, coords_list, dims ) actual_response = getattr(default_coord_transformer, direction + "_array")( coords_array, dims=dims ) assert np.allclose(actual_response, expected_response) @pytest.mark.parametrize("dim", "xyz") def test_stack_to_scaled_coord(default_coord_transformer, dim): coord = EXAMPLE_COORD default_coord_transformer.scale_z = True for src_zoom, tgt_zoom in product(ZOOM_LEVELS, repeat=2): response = default_coord_transformer.stack_to_scaled_coord( dim, coord, tgt_zoom, src_zoom ) if tgt_zoom > src_zoom: assert response < coord elif tgt_zoom < src_zoom: assert response > coord else: assert response == coord assert response == coord / np.exp2(tgt_zoom - src_zoom) def test_stack_to_scaled_coord_z(default_coord_transformer): """Test that no scaling is done in Z by default""" coord = EXAMPLE_COORD for src_zoom, tgt_zoom in product(ZOOM_LEVELS, repeat=2): response = default_coord_transformer.stack_to_scaled_coord( "z", coord, tgt_zoom, src_zoom ) assert response == coord @pytest.mark.parametrize("scale_z", (True, False)) def test_stack_to_scaled(coordinate_generator, default_coord_transformer, scale_z): default_coord_transformer.scale_z = scale_z for coords in coordinate_generator(): for src_zoom, tgt_zoom in product(ZOOM_LEVELS, repeat=2): expected_response = { dim: default_coord_transformer.stack_to_scaled_coord( dim, coord, tgt_zoom, src_zoom ) for dim, coord in coords.items() } actual_response = default_coord_transformer.stack_to_scaled( coords, tgt_zoom, src_zoom ) assert expected_response == actual_response @pytest.mark.parametrize("scale_z", (True, False)) @pytest.mark.parametrize("dims", permutations("xyz")) def test_stack_to_scaled_array( coordinate_generator, default_coord_transformer, scale_z, dims ): coords_list = list(coordinate_generator()) coords_array = np.array([[coords[dim] for dim in dims] for coords in coords_list]) default_coord_transformer.scale_z = scale_z for src_zoom, tgt_zoom in product(ZOOM_LEVELS, repeat=2): output = [] for coords in coords_list: transformed = default_coord_transformer.stack_to_scaled( coords, tgt_zoom, src_zoom ) output.append([transformed[dim] for dim in dims]) expected_response = np.array(output) actual_response = default_coord_transformer.stack_to_scaled_array( coords_array, tgt_zoom, src_zoom, dims ) assert np.allclose(expected_response, actual_response) @pytest.mark.parametrize("orientation", ["XY", "xy", 0, StackOrientation.XY, None]) def test_can_validate_orientation_valid(orientation): trans = CoordinateTransformer(orientation=orientation) assert trans.orientation == StackOrientation.XY assert trans.depth_dim == "z" @pytest.mark.parametrize( "orientation,expected_exception", [[3, ValueError], ["xyz", KeyError], ["xc", KeyError]], ) def test_can_validate_orientation_invalid(orientation, expected_exception): with pytest.raises(expected_exception): CoordinateTransformer(orientation=orientation) @pytest.mark.parametrize( "orientation,direction,expected,", [ ["XY", "project_to_stack", {"z": 0, "y": 1, "x": 2}], ["XY", "stack_to_project", {"z": 0, "y": 1, "x": 2}], ["XZ", "project_to_stack", {"z": 1, "y": 0, "x": 2}], ["XZ", "stack_to_project", {"z": 1, "y": 0, "x": 2}], ["ZY", "project_to_stack", {"z": 2, "y": 1, "x": 0}], ["ZY", "stack_to_project", {"z": 2, "y": 1, "x": 0}], ], ) def test_project_to_stack_orientation_xy(orientation, direction, expected): coord_trans = CoordinateTransformer(orientation=orientation) result = getattr(coord_trans, direction)({"z": 0, "y": 1, "x": 2}) assert result == expected
from __future__ import unicode_literals import difflib import errno import json import os import posixpath import re import socket import sys import threading import unittest import warnings from collections import Counter from copy import copy from functools import wraps from unittest.util import safe_repr from django.apps import apps from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError from django.core.handlers.wsgi import WSGIHandler, get_path_info from django.core.management import call_command from django.core.management.color import no_style from django.core.management.sql import emit_post_migrate_signal from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer from django.core.urlresolvers import clear_url_caches, set_urlconf from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction from django.forms.fields import CharField from django.http import QueryDict from django.test.client import Client from django.test.html import HTMLParseError, parse_html from django.test.signals import setting_changed, template_rendered from django.test.utils import ( CaptureQueriesContext, ContextList, compare_xml, modify_settings, override_settings, ) from django.utils import six from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango21Warning, ) from django.utils.encoding import force_text from django.utils.six.moves.urllib.parse import ( unquote, urlparse, urlsplit, urlunsplit, ) from django.utils.six.moves.urllib.request import url2pathname from django.views.static import serve __all__ = ('TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') def to_list(value): """ Puts value into a list if it's not already one. Returns an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e.msg) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super(_AssertNumQueriesContext, self).__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( executed, self.num, '\n'.join( query['sql'] for query in self.captured_queries ) ) ) class _AssertTemplateUsedContext(object): def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if len(self.rendered_templates) == 0: message += ' No template was rendered.' else: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names)) self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class SimpleTestCase(unittest.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client _overridden_settings = None _modified_settings = None @classmethod def setUpClass(cls): super(SimpleTestCase, cls).setUpClass() if cls._overridden_settings: cls._cls_overridden_context = override_settings(**cls._overridden_settings) cls._cls_overridden_context.enable() if cls._modified_settings: cls._cls_modified_context = modify_settings(cls._modified_settings) cls._cls_modified_context.enable() @classmethod def tearDownClass(cls): if hasattr(cls, '_cls_modified_context'): cls._cls_modified_context.disable() delattr(cls, '_cls_modified_context') if hasattr(cls, '_cls_overridden_context'): cls._cls_overridden_context.disable() delattr(cls, '_cls_overridden_context') super(SimpleTestCase, cls).tearDownClass() def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ testMethod = getattr(self, self._testMethodName) skipped = (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)) if not skipped: try: self._pre_setup() except Exception: result.addError(self, sys.exc_info()) return super(SimpleTestCase, self).__call__(result) if not skipped: try: self._post_teardown() except Exception: result.addError(self, sys.exc_info()) return def _pre_setup(self): """Performs any pre-test setup. This includes: * Creating a test client. * If the class has a 'urls' attribute, replace ROOT_URLCONF with it. * Clearing the mail test outbox. """ self.client = self.client_class() self._urlconf_setup() mail.outbox = [] def _urlconf_setup(self): if hasattr(self, 'urls'): warnings.warn( "SimpleTestCase.urls is deprecated and will be removed in " "Django 2.0. Use @override_settings(ROOT_URLCONF=...) " "in %s instead." % self.__class__.__name__, RemovedInDjango20Warning, stacklevel=2) set_urlconf(None) self._old_root_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = self.urls clear_url_caches() def _post_teardown(self): """Performs any post-test things. This includes: * Putting back the original ROOT_URLCONF if it was changed. """ self._urlconf_teardown() def _urlconf_teardown(self): if hasattr(self, '_old_root_urlconf'): set_urlconf(None) settings.ROOT_URLCONF = self._old_root_urlconf clear_url_caches() def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts to the original value when exiting the context. """ return override_settings(**kwargs) def modify_settings(self, **kwargs): """ A context manager that temporarily applies changes a list setting and reverts back to the original value when exiting the context. """ return modify_settings(**kwargs) def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request (use fetch_redirect_response=False to check such links without fetching them). """ if host is not None: warnings.warn( "The host argument is deprecated and no longer used by assertRedirects", RemovedInDjango21Warning, stacklevel=2 ) if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] scheme, netloc, path, query, fragment = urlsplit(url) self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) if fetch_redirect_response: redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https')) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) if url != expected_url: # For temporary backwards compatibility, try to compare with a relative url e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url) relative_url = urlunsplit(('', '', e_path, e_query, e_fragment)) if url == relative_url: warnings.warn( "assertRedirects had to strip the scheme and domain from the " "expected URL, as it was always added automatically to URLs " "before Django 1.9. Please update your expected URLs by " "removing the scheme and domain.", RemovedInDjango21Warning, stacklevel=2) expected_url = relative_url self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)) def _assert_contains(self, response, text, status_code, msg_prefix, html): # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if (hasattr(response, 'render') and callable(response.render) and not response.is_rendered): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = force_text(text, encoding=response.charset) content = content.decode(response.charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) return (text_repr, real_count, msg_prefix) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of %s in response" " (expected %d)" % (real_count, text_repr, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` doesn't occurs in the content of the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Asserts that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to " "render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i, context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue(err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors))) elif field in context[form].fields: self.fail(msg_prefix + "The field '%s' on form '%s'" " in context %d contains no errors" % (field, form, i)) else: self.fail(msg_prefix + "The form '%s' in context %d" " does not contain the field '%s'" % (form, i, field)) else: non_field_errors = context[form].non_field_errors() self.assertTrue(err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors)) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the" " response" % form) def assertFormsetError(self, response, formset, form_index, field, errors, msg_prefix=''): """ Asserts that a formset used to render the response has a specific error. For field errors, specify the ``form_index`` and the ``field``. For non-field errors, specify the ``form_index`` and the ``field`` as None. For non-form errors, specify ``form_index`` as None and the ``field`` as None. """ # Add punctuation to msg_prefix if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + 'Response did not use any contexts to ' 'render the response') # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_formset = False for i, context in enumerate(contexts): if formset not in context: continue found_formset = True for err in errors: if field is not None: if field in context[formset].forms[form_index].errors: field_errors = context[formset].forms[form_index].errors[field] self.assertTrue(err in field_errors, msg_prefix + "The field '%s' on formset '%s', " "form %d in context %d does not contain the " "error '%s' (actual errors: %s)" % (field, formset, form_index, i, err, repr(field_errors))) elif field in context[formset].forms[form_index].fields: self.fail(msg_prefix + "The field '%s' " "on formset '%s', form %d in " "context %d contains no errors" % (field, formset, form_index, i)) else: self.fail(msg_prefix + "The formset '%s', form %d in " "context %d does not contain the field '%s'" % (formset, form_index, i, field)) elif form_index is not None: non_field_errors = context[formset].forms[form_index].non_field_errors() self.assertFalse(len(non_field_errors) == 0, msg_prefix + "The formset '%s', form %d in " "context %d does not contain any non-field " "errors." % (formset, form_index, i)) self.assertTrue(err in non_field_errors, msg_prefix + "The formset '%s', form %d " "in context %d does not contain the " "non-field error '%s' " "(actual errors: %s)" % (formset, form_index, i, err, repr(non_field_errors))) else: non_form_errors = context[formset].non_form_errors() self.assertFalse(len(non_form_errors) == 0, msg_prefix + "The formset '%s' in " "context %d does not contain any " "non-form errors." % (formset, i)) self.assertTrue(err in non_form_errors, msg_prefix + "The formset '%s' in context " "%d does not contain the " "non-form error '%s' (actual errors: %s)" % (formset, i, err, repr(non_form_errors))) if not found_formset: self.fail(msg_prefix + "The formset '%s' was not used to render " "the response" % formset) def _assert_template_used(self, response, template_name, msg_prefix): if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " if template_name is not None and response is not None and not hasattr(response, 'templates'): raise ValueError( "assertTemplateUsed() and assertTemplateNotUsed() are only " "usable on responses fetched using the Django test Client." ) if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None # use this template with context manager return template_name, None, msg_prefix template_names = [t.name for t in response.templates if t.name is not None] return None, template_names, msg_prefix def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None): """ Asserts that the template with the provided name was used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix) if context_mgr_template: # Use assertTemplateUsed as context manager. return _AssertTemplateUsedContext(self, context_mgr_template) if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue(template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names))) if count is not None: self.assertEqual(template_names.count(template_name), count, msg_prefix + "Template '%s' was expected to be rendered %d " "time(s) but was actually rendered %d time(s)." % (template_name, count, template_names.count(template_name))) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Asserts that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix) if context_mgr_template: # Use assertTemplateNotUsed as context manager. return _AssertTemplateNotUsedContext(self, context_mgr_template) self.assertFalse(template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering" " the response" % template_name) def assertRaisesMessage(self, expected_exception, expected_message, callable_obj=None, *args, **kwargs): """ Asserts that the message in a raised exception matches the passed value. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. callable_obj: Function to be called. args: Extra args. kwargs: Extra kwargs. """ return six.assertRaisesRegex(self, expected_exception, re.escape(expected_message), callable_obj, *args, **kwargs) def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Asserts that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **dict(field_kwargs, required=False)) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [force_text(required.error_messages['required'])] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length': 2, 'max_length': 20}) self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) def assertHTMLEqual(self, html1, html2, msg=None): """ Asserts that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( six.text_type(dom1).splitlines(), six.text_type(dom2).splitlines()))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Asserts that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count=None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of '%s' in response" " (expected %d)" % (real_count, needle, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): """ Asserts that the JSON fragments raw and expected_data are equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except ValueError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, six.string_types): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertJSONNotEqual(self, raw, expected_data, msg=None): """ Asserts that the JSON fragments raw and expected_data are not equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except ValueError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, six.string_types): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertNotEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False # Subclasses can enable only a subset of apps for faster tests available_apps = None # Subclasses can define fixtures which will be automatically installed. fixtures = None # If transactions aren't available, Django will serialize the database # contents into a fixture during setup and flush and reload them # during teardown (as flush does not restore data from migrations). # This can be slow; this flag allows enabling on a per-case basis. serialized_rollback = False def _pre_setup(self): """Performs any pre-test setup. This includes: * If the class has an 'available_apps' attribute, restricting the app registry to these applications, then firing post_migrate -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, installing these fixtures. """ super(TransactionTestCase, self)._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=self.available_apps, enter=True) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False) raise @classmethod def _databases_names(cls, include_mirrors=True): # If the test case has a multi_db=True flag, act on all databases, # including mirrors or not. Otherwise, just on the default DB. if getattr(cls, 'multi_db', False): return [alias for alias in connections if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']] else: return [DEFAULT_DB_ALIAS] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = conn.ops.sequence_reset_by_name_sql( no_style(), conn.introspection.sequence_list()) if sql_list: with transaction.atomic(using=db_name): cursor = conn.cursor() for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) # If we need to provide replica initial data from migrated apps, # then do so. if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"): if self.available_apps is not None: apps.unset_available_apps() connections[db_name].creation.deserialize_db_from_string( connections[db_name]._test_serialized_contents ) if self.available_apps is not None: apps.set_available_apps(self.available_apps) if self.fixtures: # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name}) def _should_reload_connections(self): return True def _post_teardown(self): """Performs any post-test things. This includes: * Flushing the contents of the database, to leave a clean slate. If the class has an 'available_apps' attribute, post_migrate isn't fired. * Force-closing the connection, so the next test gets a clean cursor. """ try: self._fixture_teardown() super(TransactionTestCase, self)._post_teardown() if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect # of these statements is lost, which can affect the operation of # tests (e.g., losing a timezone setting causing objects to be # created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(): conn.close() finally: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False) def _fixture_teardown(self): # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal # when flushing only a subset of the apps for db_name in self._databases_names(include_mirrors=False): # Flush the database call_command('flush', verbosity=0, interactive=False, database=db_name, reset_sequences=False, allow_cascade=self.available_apps is not None, inhibit_post_migrate=self.available_apps is not None) def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None): items = six.moves.map(transform, qs) if not ordered: return self.assertEqual(Counter(items), Counter(values), msg=msg) values = list(values) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: raise ValueError("Trying to compare non-ordered queryset " "against more than one ordered values") return self.assertEqual(list(items), values, msg=msg) def assertNumQueries(self, num, func=None, *args, **kwargs): using = kwargs.pop("using", DEFAULT_DB_ALIAS) conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(): """ Returns True if all connections support transactions. """ return all(conn.features.supports_transactions for conn in connections.all()) class TestCase(TransactionTestCase): """ Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve test isolation. In most situation, TestCase should be prefered to TransactionTestCase as it allows faster execution. However, there are some situations where using TransactionTestCase might be necessary (e.g. testing some transactional behavior). On database backends with no transaction support, TestCase behaves as TransactionTestCase. """ @classmethod def _enter_atomics(cls): """Helper method to open atomic blocks for multiple databases""" atomics = {} for db_name in cls._databases_names(): atomics[db_name] = transaction.atomic(using=db_name) atomics[db_name].__enter__() return atomics @classmethod def _rollback_atomics(cls, atomics): """Rollback atomic blocks opened through the previous method""" for db_name in reversed(cls._databases_names()): transaction.set_rollback(True, using=db_name) atomics[db_name].__exit__(None, None, None) @classmethod def setUpClass(cls): super(TestCase, cls).setUpClass() if not connections_support_transactions(): return cls.cls_atomics = cls._enter_atomics() if cls.fixtures: for db_name in cls._databases_names(include_mirrors=False): try: call_command('loaddata', *cls.fixtures, **{ 'verbosity': 0, 'commit': False, 'database': db_name, }) except Exception: cls._rollback_atomics(cls.cls_atomics) raise cls.setUpTestData() @classmethod def tearDownClass(cls): if connections_support_transactions(): cls._rollback_atomics(cls.cls_atomics) for conn in connections.all(): conn.close() super(TestCase, cls).tearDownClass() @classmethod def setUpTestData(cls): """Load initial data for the TestCase""" pass def _should_reload_connections(self): if connections_support_transactions(): return False return super(TestCase, self)._should_reload_connections() def _fixture_setup(self): if not connections_support_transactions(): # If the backend does not support transactions, we should reload # class data before each test self.setUpTestData() return super(TestCase, self)._fixture_setup() assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' self.atomics = self._enter_atomics() def _fixture_teardown(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_teardown() self._rollback_atomics(self.atomics) class CheckCondition(object): """Descriptor class for deferred condition checking""" def __init__(self, cond_func): self.cond_func = cond_func def __get__(self, obj, objtype): return self.cond_func() def _deferredSkip(condition, reason): def decorator(test_func): if not (isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if condition(): raise unittest.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: # Assume a class is decorated test_item = test_func test_item.__unittest_skip__ = CheckCondition(condition) test_item.__unittest_skip_why__ = reason return test_item return decorator def skipIfDBFeature(*features): """ Skip a test if a database has at least one of the named features. """ return _deferredSkip( lambda: any(getattr(connection.features, feature, False) for feature in features), "Database has feature(s) %s" % ", ".join(features) ) def skipUnlessDBFeature(*features): """ Skip a test unless a database has all the named features. """ return _deferredSkip( lambda: not all(getattr(connection.features, feature, False) for feature in features), "Database doesn't support feature(s): %s" % ", ".join(features) ) class QuietWSGIRequestHandler(WSGIRequestHandler): """ Just a regular WSGIRequestHandler except it doesn't log to the standard output any of the requests received, so as to not clutter the output for the tests' results. """ def log_message(*args): pass class FSFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to a directory, as defined by one of the *_ROOT settings, and serves those files, publishing them under *_URL. """ def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super(FSFilesHandler, self).__init__() def _should_handle(self, path): """ Checks if the path should be handled. Ignores the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """ Returns the relative path to the file on disk for the given URL. """ relative_url = url[len(self.base_url[2]):] return url2pathname(relative_url) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404: pass return super(FSFilesHandler, self).get_response(request) def serve(self, request): os_rel_path = self.file_path(request.path) os_rel_path = posixpath.normpath(unquote(os_rel_path)) # Emulate behavior of django.contrib.staticfiles.views.serve() when it # invokes staticfiles' finders functionality. # TODO: Modify if/when that internal API is refactored final_rel_path = os_rel_path.replace('\\', '/').lstrip('/') return serve(request, final_rel_path, document_root=self.get_base_dir()) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super(FSFilesHandler, self).__call__(environ, start_response) class _StaticFilesHandler(FSFilesHandler): """ Handler for serving static files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): return settings.STATIC_URL class _MediaFilesHandler(FSFilesHandler): """ Handler for serving the media files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL class LiveServerThread(threading.Thread): """ Thread for running a live http server while the tests are running. """ def __init__(self, host, possible_ports, static_handler, connections_override=None): self.host = host self.port = None self.possible_ports = possible_ports self.is_ready = threading.Event() self.error = None self.static_handler = static_handler self.connections_override = connections_override super(LiveServerThread, self).__init__() def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) # Go through the list of possible ports, hoping that we can find # one that is free to use for the WSGI server. for index, port in enumerate(self.possible_ports): try: self.httpd = WSGIServer( (self.host, port), QuietWSGIRequestHandler) except socket.error as e: if (index + 1 < len(self.possible_ports) and e.errno == errno.EADDRINUSE): # This port is already in use, so we go on and try with # the next one in the list. continue else: # Either none of the given ports are free or the error # is something else than "Address already in use". So # we let that error bubble up to the main thread. raise else: # A free port was found. self.port = port break self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() def terminate(self): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() class LiveServerTestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase but also launches a live http server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. Note that it inherits from TransactionTestCase instead of TestCase because the threads do not share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ static_handler = _StaticFilesHandler @property def live_server_url(self): return 'http://%s:%s' % ( self.server_thread.host, self.server_thread.port) @classmethod def setUpClass(cls): super(LiveServerTestCase, cls).setUpClass() connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']): # Explicitly enable thread-shareability for this connection conn.allow_thread_sharing = True connections_override[conn.alias] = conn # Launch the live server's thread specified_address = os.environ.get( 'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081') # The specified ports may be of the form '8000-8010,8080,9200-9300' # i.e. a comma-separated list of ports or ranges of ports, so we break # it down into a detailed list of all possible ports. possible_ports = [] try: host, port_ranges = specified_address.split(':') for port_range in port_ranges.split(','): # A port range can be of either form: '8000' or '8000-8010'. extremes = list(map(int, port_range.split('-'))) assert len(extremes) in [1, 2] if len(extremes) == 1: # Port range of the form '8000' possible_ports.append(extremes[0]) else: # Port range of the form '8000-8010' for port in range(extremes[0], extremes[1] + 1): possible_ports.append(port) except Exception: msg = 'Invalid address ("%s") for live server.' % specified_address six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) cls.server_thread = LiveServerThread(host, possible_ports, cls.static_handler, connections_override=connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: # Clean up behind ourselves, since tearDownClass won't get called in # case of errors. cls._tearDownClassInternal() raise cls.server_thread.error @classmethod def _tearDownClassInternal(cls): # There may not be a 'server_thread' attribute if setUpClass() for some # reasons has raised an exception. if hasattr(cls, 'server_thread'): # Terminate the live server's thread cls.server_thread.terminate() cls.server_thread.join() # Restore sqlite in-memory database connections' non-shareability for conn in connections.all(): if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']): conn.allow_thread_sharing = False @classmethod def tearDownClass(cls): cls._tearDownClassInternal() super(LiveServerTestCase, cls).tearDownClass()
# Author: Ovidiu Predescu # Date: July 2011 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unittest for the twisted-style reactor. """ import os import thread import threading import unittest try: import fcntl import twisted from twisted.internet.defer import Deferred from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor from twisted.internet.protocol import Protocol from twisted.web.client import Agent from twisted.web.resource import Resource from twisted.web.server import Site from twisted.python import log from tornado.platform.twisted import TornadoReactor from zope.interface import implements except ImportError: fcntl = None twisted = None IReadDescriptor = IWriteDescriptor = None def implements(f): pass from tornado.httpclient import AsyncHTTPClient from tornado.ioloop import IOLoop from tornado.platform.auto import set_close_exec from tornado.testing import get_unused_port from tornado.util import import_object from tornado.web import RequestHandler, Application class ReactorTestCase(unittest.TestCase): def setUp(self): self._io_loop = IOLoop() self._reactor = TornadoReactor(self._io_loop) def tearDown(self): self._io_loop.close(all_fds=True) class ReactorWhenRunningTest(ReactorTestCase): def test_whenRunning(self): self._whenRunningCalled = False self._anotherWhenRunningCalled = False self._reactor.callWhenRunning(self.whenRunningCallback) self._reactor.run() self.assertTrue(self._whenRunningCalled) self.assertTrue(self._anotherWhenRunningCalled) def whenRunningCallback(self): self._whenRunningCalled = True self._reactor.callWhenRunning(self.anotherWhenRunningCallback) self._reactor.stop() def anotherWhenRunningCallback(self): self._anotherWhenRunningCalled = True class ReactorCallLaterTest(ReactorTestCase): def test_callLater(self): self._laterCalled = False self._now = self._reactor.seconds() self._timeout = 0.001 dc = self._reactor.callLater(self._timeout, self.callLaterCallback) self.assertEqual(self._reactor.getDelayedCalls(), [dc]) self._reactor.run() self.assertTrue(self._laterCalled) self.assertTrue(self._called - self._now > self._timeout) self.assertEqual(self._reactor.getDelayedCalls(), []) def callLaterCallback(self): self._laterCalled = True self._called = self._reactor.seconds() self._reactor.stop() class ReactorTwoCallLaterTest(ReactorTestCase): def test_callLater(self): self._later1Called = False self._later2Called = False self._now = self._reactor.seconds() self._timeout1 = 0.0005 dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1) self._timeout2 = 0.001 dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2) self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or self._reactor.getDelayedCalls() == [dc2, dc1]) self._reactor.run() self.assertTrue(self._later1Called) self.assertTrue(self._later2Called) self.assertTrue(self._called1 - self._now > self._timeout1) self.assertTrue(self._called2 - self._now > self._timeout2) self.assertEqual(self._reactor.getDelayedCalls(), []) def callLaterCallback1(self): self._later1Called = True self._called1 = self._reactor.seconds() def callLaterCallback2(self): self._later2Called = True self._called2 = self._reactor.seconds() self._reactor.stop() class ReactorCallFromThreadTest(ReactorTestCase): def setUp(self): super(ReactorCallFromThreadTest, self).setUp() self._mainThread = thread.get_ident() def tearDown(self): self._thread.join() super(ReactorCallFromThreadTest, self).tearDown() def _newThreadRun(self): self.assertNotEqual(self._mainThread, thread.get_ident()) if hasattr(self._thread, 'ident'): # new in python 2.6 self.assertEqual(self._thread.ident, thread.get_ident()) self._reactor.callFromThread(self._fnCalledFromThread) def _fnCalledFromThread(self): self.assertEqual(self._mainThread, thread.get_ident()) self._reactor.stop() def _whenRunningCallback(self): self._thread = threading.Thread(target=self._newThreadRun) self._thread.start() def testCallFromThread(self): self._reactor.callWhenRunning(self._whenRunningCallback) self._reactor.run() class ReactorCallInThread(ReactorTestCase): def setUp(self): super(ReactorCallInThread, self).setUp() self._mainThread = thread.get_ident() def _fnCalledInThread(self, *args, **kwargs): self.assertNotEqual(thread.get_ident(), self._mainThread) self._reactor.callFromThread(lambda: self._reactor.stop()) def _whenRunningCallback(self): self._reactor.callInThread(self._fnCalledInThread) def testCallInThread(self): self._reactor.callWhenRunning(self._whenRunningCallback) self._reactor.run() class Reader: implements(IReadDescriptor) def __init__(self, fd, callback): self._fd = fd self._callback = callback def logPrefix(self): return "Reader" def close(self): self._fd.close() def fileno(self): return self._fd.fileno() def connectionLost(self, reason): self.close() def doRead(self): self._callback(self._fd) class Writer: implements(IWriteDescriptor) def __init__(self, fd, callback): self._fd = fd self._callback = callback def logPrefix(self): return "Writer" def close(self): self._fd.close() def fileno(self): return self._fd.fileno() def connectionLost(self, reason): self.close() def doWrite(self): self._callback(self._fd) class ReactorReaderWriterTest(ReactorTestCase): def _set_nonblocking(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def setUp(self): super(ReactorReaderWriterTest, self).setUp() r, w = os.pipe() self._set_nonblocking(r) self._set_nonblocking(w) set_close_exec(r) set_close_exec(w) self._p1 = os.fdopen(r, "rb", 0) self._p2 = os.fdopen(w, "wb", 0) def tearDown(self): super(ReactorReaderWriterTest, self).tearDown() self._p1.close() self._p2.close() def _testReadWrite(self): """ In this test the writer writes an 'x' to its fd. The reader reads it, check the value and ends the test. """ self.shouldWrite = True def checkReadInput(fd): self.assertEquals(fd.read(), 'x') self._reactor.stop() def writeOnce(fd): if self.shouldWrite: self.shouldWrite = False fd.write('x') self._reader = Reader(self._p1, checkReadInput) self._writer = Writer(self._p2, writeOnce) self._reactor.addWriter(self._writer) # Test that adding the reader twice adds it only once to # IOLoop. self._reactor.addReader(self._reader) self._reactor.addReader(self._reader) def testReadWrite(self): self._reactor.callWhenRunning(self._testReadWrite) self._reactor.run() def _testNoWriter(self): """ In this test we have no writer. Make sure the reader doesn't read anything. """ def checkReadInput(fd): self.fail("Must not be called.") def stopTest(): # Close the writer here since the IOLoop doesn't know # about it. self._writer.close() self._reactor.stop() self._reader = Reader(self._p1, checkReadInput) # We create a writer, but it should never be invoked. self._writer = Writer(self._p2, lambda fd: fd.write('x')) # Test that adding and removing the writer leaves us with no writer. self._reactor.addWriter(self._writer) self._reactor.removeWriter(self._writer) # Test that adding and removing the reader doesn't cause # unintended effects. self._reactor.addReader(self._reader) # Wake up after a moment and stop the test self._reactor.callLater(0.001, stopTest) def testNoWriter(self): self._reactor.callWhenRunning(self._testNoWriter) self._reactor.run() # Test various combinations of twisted and tornado http servers, # http clients, and event loop interfaces. class CompatibilityTests(unittest.TestCase): def setUp(self): self.io_loop = IOLoop() self.reactor = TornadoReactor(self.io_loop) def tearDown(self): self.reactor.disconnectAll() self.io_loop.close(all_fds=True) def start_twisted_server(self): class HelloResource(Resource): isLeaf = True def render_GET(self, request): return "Hello from twisted!" site = Site(HelloResource()) self.twisted_port = get_unused_port() self.reactor.listenTCP(self.twisted_port, site, interface='127.0.0.1') def start_tornado_server(self): class HelloHandler(RequestHandler): def get(self): self.write("Hello from tornado!") app = Application([('/', HelloHandler)], log_function=lambda x: None) self.tornado_port = get_unused_port() app.listen(self.tornado_port, address='127.0.0.1', io_loop=self.io_loop) def run_ioloop(self): self.stop_loop = self.io_loop.stop self.io_loop.start() self.reactor.fireSystemEvent('shutdown') def run_reactor(self): self.stop_loop = self.reactor.stop self.stop = self.reactor.stop self.reactor.run() def tornado_fetch(self, url, runner): responses = [] client = AsyncHTTPClient(self.io_loop) def callback(response): responses.append(response) self.stop_loop() client.fetch(url, callback=callback) runner() self.assertEqual(len(responses), 1) responses[0].rethrow() return responses[0] def twisted_fetch(self, url, runner): # http://twistedmatrix.com/documents/current/web/howto/client.html chunks = [] client = Agent(self.reactor) d = client.request('GET', url) class Accumulator(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, data): chunks.append(data) def connectionLost(self, reason): self.finished.callback(None) def callback(response): finished = Deferred() response.deliverBody(Accumulator(finished)) return finished d.addCallback(callback) def shutdown(ignored): self.stop_loop() d.addBoth(shutdown) runner() self.assertTrue(chunks) return ''.join(chunks) def testTwistedServerTornadoClientIOLoop(self): self.start_twisted_server() response = self.tornado_fetch( 'http://localhost:%d' % self.twisted_port, self.run_ioloop) self.assertEqual(response.body, 'Hello from twisted!') def testTwistedServerTornadoClientReactor(self): self.start_twisted_server() response = self.tornado_fetch( 'http://localhost:%d' % self.twisted_port, self.run_reactor) self.assertEqual(response.body, 'Hello from twisted!') def testTornadoServerTwistedClientIOLoop(self): self.start_tornado_server() response = self.twisted_fetch( 'http://localhost:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!') def testTornadoServerTwistedClientReactor(self): self.start_tornado_server() response = self.twisted_fetch( 'http://localhost:%d' % self.tornado_port, self.run_reactor) self.assertEqual(response, 'Hello from tornado!') if twisted is None: del ReactorWhenRunningTest del ReactorCallLaterTest del ReactorTwoCallLaterTest del ReactorCallFromThreadTest del ReactorCallInThread del ReactorReaderWriterTest del CompatibilityTests else: # Import and run as much of twisted's test suite as possible. # This is unfortunately rather dependent on implementation details, # but there doesn't appear to be a clean all-in-one conformance test # suite for reactors. # # This is a list of all test suites using the ReactorBuilder # available in Twisted 11.0.0 and 11.1.0 (and a blacklist of # specific test methods to be disabled). twisted_tests = { 'twisted.internet.test.test_core.ObjectModelIntegrationTest': [], 'twisted.internet.test.test_core.SystemEventTestsBuilder': [ 'test_iterate', # deliberately not supported ], 'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [ "test_lostFileDescriptor", # incompatible with epoll and kqueue ], 'twisted.internet.test.test_process.ProcessTestsBuilder': [ # Doesn't work on python 2.5 'test_systemCallUninterruptedByChildExit', # Doesn't clean up its temp files 'test_shebang', ], 'twisted.internet.test.test_process.PTYProcessTestsBuilder': [ 'test_systemCallUninterruptedByChildExit', ], 'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [], 'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [], 'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [], 'twisted.internet.test.test_tcp.WriteSequenceTests': [], 'twisted.internet.test.test_tcp.AbortConnectionTestCase': [], 'twisted.internet.test.test_threads.ThreadTestsBuilder': [], 'twisted.internet.test.test_time.TimeTestsBuilder': [], # Extra third-party dependencies (pyOpenSSL) #'twisted.internet.test.test_tls.SSLClientTestsMixin': [], 'twisted.internet.test.test_udp.UDPServerTestsBuilder': [], 'twisted.internet.test.test_unix.UNIXTestsBuilder': [ # Platform-specific. These tests would be skipped automatically # if we were running twisted's own test runner. 'test_connectToLinuxAbstractNamespace', 'test_listenOnLinuxAbstractNamespace', ], 'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [ 'test_listenOnLinuxAbstractNamespace', ], 'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [], } for test_name, blacklist in twisted_tests.iteritems(): try: test_class = import_object(test_name) except (ImportError, AttributeError): continue for test_func in blacklist: if hasattr(test_class, test_func): # The test_func may be defined in a mixin, so clobber # it instead of delattr() setattr(test_class, test_func, lambda self: None) def make_test_subclass(test_class): class TornadoTest(test_class): _reactors = ["tornado.platform.twisted._TestReactor"] def unbuildReactor(self, reactor): test_class.unbuildReactor(self, reactor) # Clean up file descriptors (especially epoll/kqueue # objects) eagerly instead of leaving them for the # GC. Unfortunately we can't do this in reactor.stop # since twisted expects to be able to unregister # connections in a post-shutdown hook. reactor._io_loop.close(all_fds=True) TornadoTest.__name__ = test_class.__name__ return TornadoTest test_subclass = make_test_subclass(test_class) globals().update(test_subclass.makeTestCaseClasses()) # Since we're not using twisted's test runner, it's tricky to get # logging set up well. Most of the time it's easiest to just # leave it turned off, but while working on these tests you may want # to uncomment one of the other lines instead. log.defaultObserver.stop() #import sys; log.startLogging(sys.stderr, setStdout=0) #log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0) if __name__ == "__main__": unittest.main()
""" Management of Zabbix hosts. :codeauthor: Jiri Kotlin <[email protected]> """ from collections.abc import Mapping from copy import deepcopy import salt.utils.dictdiffer import salt.utils.json def __virtual__(): """ Only make these states available if Zabbix module is available. """ if "zabbix.host_create" in __salt__: return True return (False, "zabbix module could not be loaded") def present(host, groups, interfaces, **kwargs): """ Ensures that the host exists, eventually creates new host. NOTE: please use argument visible_name instead of name to not mess with name from salt sls. This function accepts all standard host properties: keyword argument names differ depending on your zabbix version, see: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/object#host .. versionadded:: 2016.3.0 :param host: technical name of the host :param groups: groupids of host groups to add the host to :param interfaces: interfaces to be created for the host :param proxy_host: Optional proxy name or proxyid to monitor host :param inventory: Optional list or dictionary of inventory names and values :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :param visible_name: Optional - string with visible name of the host, use 'visible_name' instead of 'name' parameter to not mess with value supplied from Salt sls file. :param inventory_clean: Optional - Boolean value that selects if the current inventory will be cleaned and overwritten by the declared inventory list (True); or if the inventory will be kept and only updated with inventory list contents (False). Defaults to True .. code-block:: yaml create_test_host: zabbix_host.present: - host: TestHostWithInterfaces - proxy_host: 12345 - groups: - 5 - 6 - 7 - interfaces: - test1.example.com: - ip: '192.168.1.8' - type: 'Agent' - port: 92 - testing2_create: - ip: '192.168.1.9' - dns: 'test2.example.com' - type: 'agent' - main: false - testovaci1_ipmi: - ip: '192.168.100.111' - type: 'ipmi' - inventory: - alias: some alias - asset_tag: jlm3937 """ connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs.pop("_connection_user") if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs.pop("_connection_password") if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs.pop("_connection_url") ret = {"name": host, "changes": {}, "result": False, "comment": ""} # Comment and change messages comment_host_created = "Host {} created.".format(host) comment_host_updated = "Host {} updated.".format(host) comment_host_notcreated = "Unable to create host: {}. ".format(host) comment_host_exists = "Host {} already exists.".format(host) changes_host_created = { host: { "old": "Host {} does not exist.".format(host), "new": "Host {} created.".format(host), } } def _interface_format(interfaces_data): """ Formats interfaces from SLS file into valid JSON usable for zabbix API. Completes JSON with default values. :param interfaces_data: list of interfaces data from SLS file """ if not interfaces_data: return list() interface_attrs = ("ip", "dns", "main", "type", "useip", "port", "details") interfaces_json = salt.utils.json.loads(salt.utils.json.dumps(interfaces_data)) interfaces_dict = dict() for interface in interfaces_json: for intf in interface: intf_name = intf interfaces_dict[intf_name] = dict() for intf_val in interface[intf]: for key, value in intf_val.items(): if key in interface_attrs: interfaces_dict[intf_name][key] = value interfaces_list = list() interface_ports = { "agent": ["1", "10050"], "snmp": ["2", "161"], "ipmi": ["3", "623"], "jmx": ["4", "12345"], } for key, value in interfaces_dict.items(): # Load interface values or default values interface_type = interface_ports[value["type"].lower()][0] main = "1" if str(value.get("main", "true")).lower() == "true" else "0" useip = "1" if str(value.get("useip", "true")).lower() == "true" else "0" interface_ip = value.get("ip", "") dns = value.get("dns", key) port = str(value.get("port", interface_ports[value["type"].lower()][1])) if interface_type == "2": if not value.get("details", False): details_version = "2" details_bulk = "1" details_community = "{$SNMP_COMMUNITY}" else: val_details = {} for detail in value.get("details"): val_details.update(detail) details_version = val_details.get("version", "2") details_bulk = val_details.get("bulk", "1") details_community = val_details.get( "community", "{$SNMP_COMMUNITY}" ) details = { "version": details_version, "bulk": details_bulk, "community": details_community, } if details_version == "3": details_securitylevel = val_details.get("securitylevel", "0") details_securityname = val_details.get("securityname", "") details_contextname = val_details.get("contextname", "") details["securitylevel"] = details_securitylevel details["securityname"] = details_securityname details["contextname"] = details_contextname if int(details_securitylevel) > 0: details_authpassphrase = val_details.get("authpassphrase", "") details_authprotocol = val_details.get("authprotocol", "0") details["authpassphrase"] = details_authpassphrase details["authprotocol"] = details_authprotocol if int(details_securitylevel) > 1: details_privpassphrase = val_details.get( "privpassphrase", "" ) details_privprotocol = val_details.get("privprotocol", "0") details["privpassphrase"] = details_privpassphrase details["privprotocol"] = details_privprotocol else: details = [] interfaces_list.append( { "type": interface_type, "main": main, "useip": useip, "ip": interface_ip, "dns": dns, "port": port, "details": details, } ) interfaces_list_sorted = sorted( interfaces_list, key=lambda k: k["main"], reverse=True ) return interfaces_list_sorted interfaces_formated = _interface_format(interfaces) # Ensure groups are all groupid groupids = [] for group in groups: if isinstance(group, str): groupid = __salt__["zabbix.hostgroup_get"](name=group, **connection_args) try: groupids.append(int(groupid[0]["groupid"])) except TypeError: ret["comment"] = "Invalid group {}".format(group) return ret else: groupids.append(group) groups = groupids # Get and validate proxyid proxy_hostid = "0" if "proxy_host" in kwargs: proxy_host = kwargs.pop("proxy_host") # Test if proxy_host given as name if isinstance(proxy_host, str): try: proxy_hostid = __salt__["zabbix.run_query"]( "proxy.get", { "output": "proxyid", "selectInterface": "extend", "filter": {"host": "{}".format(proxy_host)}, }, **connection_args )[0]["proxyid"] except TypeError: ret["comment"] = "Invalid proxy_host {}".format(proxy_host) return ret # Otherwise lookup proxy_host as proxyid else: try: proxy_hostid = __salt__["zabbix.run_query"]( "proxy.get", {"proxyids": "{}".format(proxy_host), "output": "proxyid"}, **connection_args )[0]["proxyid"] except TypeError: ret["comment"] = "Invalid proxy_host {}".format(proxy_host) return ret # Selects if the current inventory should be substituted by the new one inventory_clean = kwargs.pop("inventory_clean", True) inventory = kwargs.pop("inventory", None) new_inventory = {} if isinstance(inventory, Mapping): new_inventory = dict(inventory) elif inventory is not None: # Create dict of requested inventory items for inv_item in inventory: for k, v in inv_item.items(): new_inventory[k] = str(v) visible_name = kwargs.pop("visible_name", None) host_extra_properties = {} if kwargs: host_properties_definition = [ "description", "inventory_mode", "ipmi_authtype", "ipmi_password", "ipmi_privilege", "ipmi_username", "status", "tls_connect", "tls_accept", "tls_issuer", "tls_subject", "tls_psk_identity", "tls_psk", ] for param in host_properties_definition: if param in kwargs: host_extra_properties[param] = kwargs.pop(param) host_exists = __salt__["zabbix.host_exists"](host, **connection_args) if host_exists: host = __salt__["zabbix.host_get"](host=host, **connection_args)[0] hostid = host["hostid"] update_host = False update_proxy = False update_hostgroups = False update_interfaces = False update_inventory = False host_updated_params = {} for param in host_extra_properties: if param in host: if host[param] == host_extra_properties[param]: continue host_updated_params[param] = host_extra_properties[param] if host_updated_params: update_host = True host_inventory_mode = host["inventory_mode"] inventory_mode = host_extra_properties.get( "inventory_mode", "0" if host_inventory_mode == "-1" else host_inventory_mode, ) cur_proxy_hostid = host["proxy_hostid"] if proxy_hostid != cur_proxy_hostid: update_proxy = True hostgroups = __salt__["zabbix.hostgroup_get"](hostids=hostid, **connection_args) cur_hostgroups = list() for hostgroup in hostgroups: cur_hostgroups.append(int(hostgroup["groupid"])) if set(groups) != set(cur_hostgroups): update_hostgroups = True hostinterfaces = __salt__["zabbix.hostinterface_get"]( hostids=hostid, **connection_args ) if hostinterfaces: hostinterfaces = sorted(hostinterfaces, key=lambda k: k["main"]) hostinterfaces_copy = deepcopy(hostinterfaces) for hostintf in hostinterfaces_copy: hostintf.pop("interfaceid") hostintf.pop("hostid") # "bulk" is present only in snmp interfaces with Zabbix < 5.0 if "bulk" in hostintf: hostintf.pop("bulk") # as we always sent the "details" it needs to be # populated in Zabbix < 5.0 response: if hostintf["type"] == "2": hostintf["details"] = { "version": "2", "bulk": "1", "community": "{$SNMP_COMMUNITY}", } else: hostintf["details"] = [] interface_diff = [ x for x in interfaces_formated if x not in hostinterfaces_copy ] + [y for y in hostinterfaces_copy if y not in interfaces_formated] if interface_diff: update_interfaces = True elif not hostinterfaces and interfaces: update_interfaces = True # if inventory param is empty leave inventory as is don't compare it # if inventory_mode is '-1', the inventory will be erased, why compare it? if inventory is not None and inventory_mode != "-1": cur_inventory = __salt__["zabbix.host_inventory_get"]( hostids=hostid, **connection_args ) inventory_diff = salt.utils.dictdiffer.diff(cur_inventory, new_inventory) if inventory_diff.changed(): update_inventory = True # Dry run, test=true mode if __opts__["test"]: if host_exists: if ( update_host or update_hostgroups or update_interfaces or update_proxy or update_inventory ): ret["result"] = None ret["comment"] = comment_host_updated else: ret["result"] = True ret["comment"] = comment_host_exists else: ret["result"] = None ret["comment"] = comment_host_created ret["changes"] = changes_host_created return ret error = [] if host_exists: ret["result"] = True if ( update_host or update_hostgroups or update_interfaces or update_proxy or update_inventory ): if update_host: # combine connection_args and host_updated_params sum_kwargs = deepcopy(host_updated_params) sum_kwargs.update(connection_args) hostupdate = __salt__["zabbix.host_update"](hostid, **sum_kwargs) ret["changes"]["host"] = str(host_updated_params) if "error" in hostupdate: error.append(hostupdate["error"]) if update_inventory: # combine connection_args, inventory, and clear_old sum_kwargs = deepcopy(new_inventory) sum_kwargs.update(connection_args) sum_kwargs["clear_old"] = inventory_clean sum_kwargs["inventory_mode"] = inventory_mode hostupdate = __salt__["zabbix.host_inventory_set"](hostid, **sum_kwargs) ret["changes"]["inventory"] = str(new_inventory) if "error" in hostupdate: error.append(hostupdate["error"]) if update_proxy: hostupdate = __salt__["zabbix.host_update"]( hostid, proxy_hostid=proxy_hostid, **connection_args ) ret["changes"]["proxy_hostid"] = str(proxy_hostid) if "error" in hostupdate: error.append(hostupdate["error"]) if update_hostgroups: hostupdate = __salt__["zabbix.host_update"]( hostid, groups=groups, **connection_args ) ret["changes"]["groups"] = str(groups) if "error" in hostupdate: error.append(hostupdate["error"]) if update_interfaces: interfaceid_by_type = { "1": [], # agent "2": [], # snmp "3": [], # ipmi "4": [], # jmx } other_interfaces = [] if hostinterfaces: for interface in hostinterfaces: if interface["main"]: interfaceid_by_type[interface["type"]].insert( 0, interface["interfaceid"] ) else: interfaceid_by_type[interface["type"]].append( interface["interfaceid"] ) def _update_interfaces(interface): if not interfaceid_by_type[interface["type"]]: ret = __salt__["zabbix.hostinterface_create"]( hostid, interface["ip"], dns=interface["dns"], main=interface["main"], if_type=interface["type"], useip=interface["useip"], port=interface["port"], details=interface["details"], **connection_args ) else: interfaceid = interfaceid_by_type[interface["type"]].pop(0) ret = __salt__["zabbix.hostinterface_update"]( interfaceid=interfaceid, ip=interface["ip"], dns=interface["dns"], main=interface["main"], type=interface["type"], useip=interface["useip"], port=interface["port"], details=interface["details"], **connection_args ) return ret # First we try to update the "default" interfaces every host # needs at least one "default" interface for interface in interfaces_formated: if interface["main"]: updatedint = _update_interfaces(interface) if "error" in updatedint: error.append(updatedint["error"]) else: other_interfaces.append(interface) # Second we update the other interfaces for interface in other_interfaces: updatedint = _update_interfaces(interface) if "error" in updatedint: error.append(updatedint["error"]) # And finally remove the ones that isn't in the host state for interface_type in interfaceid_by_type: for interfaceid in interfaceid_by_type[interface_type]: __salt__["zabbix.hostinterface_delete"]( interfaceids=interfaceid, **connection_args ) ret["changes"]["interfaces"] = str(interfaces_formated) ret["comment"] = comment_host_updated else: ret["comment"] = comment_host_exists else: # combine connection_args and host_properties sum_kwargs = host_extra_properties sum_kwargs.update(connection_args) host_create = __salt__["zabbix.host_create"]( host, groups, interfaces_formated, proxy_hostid=proxy_hostid, inventory=new_inventory, visible_name=visible_name, **sum_kwargs ) if "error" not in host_create: ret["result"] = True ret["comment"] = comment_host_created ret["changes"] = changes_host_created else: ret["result"] = False ret["comment"] = comment_host_notcreated + str(host_create["error"]) # error detected if error: ret["changes"] = {} ret["result"] = False ret["comment"] = str(error) return ret def absent(name, **kwargs): """ Ensures that the host does not exists, eventually deletes host. .. versionadded:: 2016.3.0 :param: name: technical name of the host :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml TestHostWithInterfaces: zabbix_host.absent """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} # Comment and change messages comment_host_deleted = "Host {} deleted.".format(name) comment_host_notdeleted = "Unable to delete host: {}. ".format(name) comment_host_notexists = "Host {} does not exist.".format(name) changes_host_deleted = { name: { "old": "Host {} exists.".format(name), "new": "Host {} deleted.".format(name), } } connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs["_connection_user"] if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs["_connection_password"] if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] host_exists = __salt__["zabbix.host_exists"](name, **connection_args) # Dry run, test=true mode if __opts__["test"]: if not host_exists: ret["result"] = True ret["comment"] = comment_host_notexists else: ret["result"] = None ret["comment"] = comment_host_deleted return ret host_get = __salt__["zabbix.host_get"](name, **connection_args) if not host_get: ret["result"] = True ret["comment"] = comment_host_notexists else: try: hostid = host_get[0]["hostid"] host_delete = __salt__["zabbix.host_delete"](hostid, **connection_args) except KeyError: host_delete = False if host_delete and "error" not in host_delete: ret["result"] = True ret["comment"] = comment_host_deleted ret["changes"] = changes_host_deleted else: ret["result"] = False ret["comment"] = comment_host_notdeleted + str(host_delete["error"]) return ret def assign_templates(host, templates, **kwargs): """ Ensures that templates are assigned to the host. .. versionadded:: 2017.7.0 :param host: technical name of the host :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml add_zabbix_templates_to_host: zabbix_host.assign_templates: - host: TestHost - templates: - "Template OS Linux" - "Template App MySQL" """ connection_args = {} if "_connection_user" in kwargs: connection_args["_connection_user"] = kwargs["_connection_user"] if "_connection_password" in kwargs: connection_args["_connection_password"] = kwargs["_connection_password"] if "_connection_url" in kwargs: connection_args["_connection_url"] = kwargs["_connection_url"] ret = {"name": host, "changes": {}, "result": False, "comment": ""} # Set comments comment_host_templates_updated = "Templates updated." comment_host_templ_notupdated = "Unable to update templates on host: {}.".format( host ) comment_host_templates_in_sync = "Templates already synced." update_host_templates = False curr_template_ids = list() requested_template_ids = list() hostid = "" host_exists = __salt__["zabbix.host_exists"](host, **connection_args) # Fail out if host does not exist if not host_exists: ret["result"] = False ret["comment"] = comment_host_templ_notupdated return ret host_info = __salt__["zabbix.host_get"](host=host, **connection_args)[0] hostid = host_info["hostid"] if not templates: templates = list() # Get current templateids for host host_templates = __salt__["zabbix.host_get"]( hostids=hostid, output='[{"hostid"}]', selectParentTemplates='["templateid"]', **connection_args ) for template_id in host_templates[0]["parentTemplates"]: curr_template_ids.append(template_id["templateid"]) # Get requested templateids for template in templates: try: template_id = __salt__["zabbix.template_get"]( host=template, **connection_args )[0]["templateid"] requested_template_ids.append(template_id) except TypeError: ret["result"] = False ret["comment"] = "Unable to find template: {}.".format(template) return ret # remove any duplications requested_template_ids = list(set(requested_template_ids)) if set(curr_template_ids) != set(requested_template_ids): update_host_templates = True # Set change output changes_host_templates_modified = { host: { "old": "Host templates: " + ", ".join(curr_template_ids), "new": "Host templates: " + ", ".join(requested_template_ids), } } # Dry run, test=true mode if __opts__["test"]: if update_host_templates: ret["result"] = None ret["comment"] = comment_host_templates_updated else: ret["result"] = True ret["comment"] = comment_host_templates_in_sync return ret # Attempt to perform update ret["result"] = True if update_host_templates: update_output = __salt__["zabbix.host_update"]( hostid, templates=(requested_template_ids), **connection_args ) if update_output is False: ret["result"] = False ret["comment"] = comment_host_templ_notupdated return ret ret["comment"] = comment_host_templates_updated ret["changes"] = changes_host_templates_modified else: ret["comment"] = comment_host_templates_in_sync return ret
# Copyright (c) 2014 Greg James, Visual6502.org # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os, struct from array import array import params from sim6502 import Sim6502 from simTIA import SimTIA from emuPIA import EmuPIA class Sim2600Console: def __init__(self, romFilePath, sim6502factory=Sim6502, simTIAfactory= SimTIA): self.sim6507 = sim6502factory() self.simTIA = simTIAfactory() self.emuPIA = EmuPIA() self.rom = array('B', [0] * 4096) self.bankSwitchROMOffset = 0 self.programLen = 0 self.loadProgram(romFilePath) self.sim6507.resetChip() # The 6507's IRQ and NMI are connected to the supply voltage # Setting them to 'pulled high' will keep them high. self.sim6507.setPulledHigh(self.sim6507.getWireIndex('IRQ')) self.sim6507.setPulledHigh(self.sim6507.getWireIndex('NMI')) self.sim6507.recalcWireNameList(['IRQ', 'NMI']) # TIA CS1 is always high. !CS2 is always grounded self.simTIA.setPulledHigh(self.simTIA.getWireIndex('CS1')) self.simTIA.setPulledLow(self.simTIA.getWireIndex('CS2')) self.simTIA.recalcWireNameList(['CS1','CS2']) # We're running an Atari 2600 program, so set memory locations # for the console's switches and joystick state. # Console switches: # d3 set to 1 for color (vs B&W), # d1 select set to 1 for 'switch not pressed' # d0 set to 1 switch self.writeMemory(0x0282, 0x0B, True) # No joystick motion # joystick trigger buttons read on bit 7 of INPT4 and INPT5 of TIA self.writeMemory(0x0280, 0xFF, True) # Memory is mapped as follows: # 0x00 - 0x2C write to TIA # 0x30 - 0x3D read from TIA # 0x80 - 0xFF PIA RAM (128 bytes), also mapped to 0x0180 - 0x01FF for the stack # 0280 - 0297 PIA i/o ports and timer # F000 - FFFF Cartridge memory, 4kb # We handle 2k, 4k, and 8k cartridges, but only handle the bank switching # operations used by Asteroids: write to 0xFFF8 or 0xFFF9 # def readMemory(self, addr): if addr > 0x02FF and addr < 0x8000: estr = 'ERROR: 6507 ROM reading addr from 0x1000 to 0x1FFF: 0x%X'%addr print(estr) return 0 data = 0 if (addr >= 0x80 and addr <= 0xFF) or (addr >= 0x180 and addr <= 0x1FF): data = self.emuPIA.ram[(addr & 0xFF) - 0x80] elif addr >= 0x0280 and addr <= 0x0297: data = self.emuPIA.iot[addr - 0x0280] elif addr >= 0xF000 or \ (addr >= 0xD000 and addr <= 0xDFFF and self.programLen == 8192): data = self.rom[addr - 0xF000 + self.bankSwitchROMOffset] elif addr >= 0x30 and addr <= 0x3D: # This is a read from the TIA where the value is # controlled by the TIA data bus bits 6 and 7 drive-low # and drive-high gates: DB6_drvLo, DB6_drvHi, etc. # This is handled below, so no need for anything here pass elif addr <= 0x2C or (addr >= 0x100 and addr <= 0x12C): # This happens all the time, usually at startup when # setting data at all writeable addresses to 0. msg = 'CURIOUS: Attempt to read from TIA write-only address 0x%4.4X'%(addr) #print(msg) else: # This can happen when the 6507 is coming out of RESET. # It sets the first byte of the address bus, issues a read, # then sets the second byte, and issues another read to get # the correct reset vector. msg = 'WARNING: Unhandled address in readMemory: 0x%4.4X'%(addr) print(msg) cpu = self.sim6507 tia = self.simTIA if cpu.isHigh(cpu.padIndSYNC): for wireIndex in tia.dataBusDrivers: if tia.isHigh(wireIndex): estr = 'ERROR: TIA driving DB when 6502 fetching ' + \ 'instruction at addr 0x%X'%(addr) print(estr) else: if tia.isHigh(tia.indDB6_drvLo): data = data & (0xFF ^ (1<<6)) if tia.isHigh(tia.indDB6_drvHi): data = data | (1<<6) if tia.isHigh(tia.indDB7_drvLo): data = data & (0xFF ^ (1<<7)) if tia.isHigh(tia.indDB7_drvHi): data = data | (1<<7) if addr & 0x200 and addr < 0x2FF: print('6507 READ [0x%X]: 0x%X'%(addr, data)) cpu.setDataBusValue(data) cpu.recalcWireList(cpu.dataBusPads) return data def writeMemory(self, addr, byteValue, setup=False): cpu = self.sim6507 tia = self.simTIA pia = self.emuPIA if cpu.isLow(cpu.padReset) and not setup: print('Skipping 6507 write during reset. addr: 0x%X'%(addr)) return if addr >= 0xF000 and not setup: if self.programLen == 8192: if addr == 0xFFF9: # switch to bank 0 which starts at 0xD000 self.bankSwitchROMOffset = 0x2000 elif addr == 0xFFF8: self.bankSwitchROMOffset = 0x1000 else: estr = 'ERROR: 6507 writing to ROM space addr ' + \ '0x4.4%X data 0x%2.2X '%(addr, byteValue) if addr >= 0xFFF4 and addr <= 0xFFFB: estr += 'This is likely a bank switch strobe we have not implemented' elif addr >= 0xF000 and addr <= 0xF07F: estr += 'This is likely a cartridge RAM write we have not implemented' raise RuntimeError(estr) # 6502 shouldn't write to where we keep the console switches if (addr == 0x282 or addr == 0x280) and not setup: estr = 'ERROR: 6507 writing to console or joystick switches ' + \ 'addr 0x%4.4X data 0x%2.2X'%(addr,byteValue) print(estr) return if addr < 0x280: msg = '6507 WRITE to [0x%4.4X]: 0x%2.2X at 6507 halfclock %d'% \ (addr, byteValue, cpu.halfClkCount) print(msg) if (addr >= 0x80 and addr <= 0xFF) or (addr >= 0x180 and addr <= 0x1FF): pia.ram[(addr & 0xFF) - 0x80] = byteValue elif addr >= 0x0280 and addr <= 0x0297: pia.iot[addr - 0x0280] = byteValue period = None if addr == 0x294: period = 1 elif addr == 0x295: period = 8 elif addr == 0x296: period = 64 elif addr == 0x297: period = 1024 if period != None: pia.timerPeriod = period # initial value for timer read from data bus pia.timerVal = cpu.getDataBusValue() pia.timerClockCount = 0 pia.timerFinished = False #elif addr <= 0x2C: # # Remember what we wrote to the TIA write-only address # # This is only for bookeeping and debugging and is not # # used for simulation. # self.simTIA.lastControlValue[addr] = byteValue def loadProgramBytes(self, progByteList, baseAddr, setResetVector): pch = baseAddr >> 8 pcl = baseAddr & 0xFF print('loadProgramBytes base addr $%2.2X%2.2X'%(pch,pcl)) romDuplicate = 1 programLen = len(progByteList) self.programLen = programLen if not programLen in [2048, 4096, 8192]: estr = 'No support for program byte list of length %d'%(programLen) raise RuntimeError(estr) if programLen == 2048: # Duplicate ROM contents so it fills all of 0xF000 - 0xFFFF romDuplicate = 2 elif programLen == 8192: self.bankSwitchROMOffset = 0x1000 self.rom = array('B', progByteList * romDuplicate) if setResetVector == True: print("Setting program's reset vector to program's base address") self.writeMemory(0xFFFC, pcl, True) self.writeMemory(0xFFFD, pch, True) else: pcl = self.readMemory(0xFFFA) pch = self.readMemory(0xFFFB) print("NMI vector: %X %X"%(pch, pcl)) pcl = self.readMemory(0xFFFC) pch = self.readMemory(0xFFFD) print("Reset vector: %X %X"%(pch, pcl)) pcl = self.readMemory(0xFFFE) pch = self.readMemory(0xFFFF) print("IRQ/BRK vector: %X %X"%(pch, pcl)) def loadProgram(self, programFilePath): if not os.path.exists(programFilePath): estr = 'ERROR: Could not find program "%s"'%(programFilePath) + \ 'from current dir %s'%(os.getcwd()) raise RuntimeError(estr) print('Setting 6502 program to ROM image %s'%(programFilePath)) self.programFilePath = programFilePath # load ROM from file of = open (programFilePath, 'rb') byteStr = of.read() of.close() program = [] progHex = '' count = 0 for byte in byteStr: intVal = struct.unpack ('1B', byte)[0] progHex += '%2.2X '%intVal count += 1 if count == 8: progHex += ' ' elif count == 16: progHex += '\n' count = 0 program.append (intVal) baseAddr = 0xF000 if len(byteStr) == 8192: print('Loading 8kb ROM starting from 0x%X'%baseAddr) elif len(byteStr) == 2048: baseAddr = 0xF800 print('Loading 2kb ROM starting from 0x%X'%baseAddr) self.loadProgramBytes(program, baseAddr, False) def updateDataBus(self): cpu = self.sim6507 tia = self.simTIA # transfer 6507 data bus to TIA # TIA DB0-DB5 are pure inputs # TIA DB6 and DB7 can be driven high or low by the TIA # TIA CS3 or CS0 high inhibits tia from driving db6 and db7 i = 0 numPads = len(cpu.dataBusPads) while i < numPads: dbPadHigh = cpu.isHigh(cpu.dataBusPads[i]) tia.setPulled(tia.dataBusPads[i], dbPadHigh) i += 1 tia.recalcWireList(tia.dataBusPads) hidrv = False for wireInd in tia.dataBusDrivers: if tia.isHigh(wireInd): hidrv = True break if hidrv: # 6502 SYNC is HIGH when its fetching instruction, so make sure # our DB is not being written to by the TIA at this time if cpu.isHigh(cpu.padIndSYNC): estr = 'ERROR: TIA driving DB when 6502 fetching instruction' #report.add (estr) print(estr) def advanceOneHalfClock(self): #D circuitSim6502, circuitSimTIA, emuPIA): cpu = self.sim6507 tia = self.simTIA pia = self.emuPIA # Set all TIA inputs to be pulled high. These aren't updated to # reflect any joystick or console switch inputs, but they could be. # To give the sim those inputs, you could check the sim halfClkCount, # and when it hits a certain value or range of values, set whatever # ins you like to low or high. # Here, we make an arbitrary choice to set the pads to be pulled # high for 10 half clocks. After this, they should remain pulled # high, so choosing 10 half clocks or N > 0 half clocks makes no # difference. if tia.halfClkCount < 10: for wireIndex in tia.inputPads: tia.setPulledHigh(wireIndex) tia.recalcWireList(tia.inputPads) tia.setPulledHigh(tia.padIndDEL) tia.recalcWire(tia.padIndDEL) # TIA 6x45 control ROM will change when R/W goes HI to LOW only if # the TIA CLK2 is LOW, so update R/W first, then CLK2. # R/W is high when 6502 is reading, low when 6502 is writing tia.setPulled(tia.padIndRW, cpu.isHigh(cpu.padIndRW)) tia.recalcWire(tia.padIndRW) addr = cpu.getAddressBusValue() # Transfer the state of the 6507 simulation's address bus # to the corresponding address inputs of the TIA simulation for i, tiaWireIndex in enumerate(tia.addressBusPads): padValue = cpu.isHigh(cpu.addressBusPads[i]) if cpu.isHigh(cpu.addressBusPads[i]): tia.setHigh(tiaWireIndex) else: tia.setLow(tiaWireIndex) tia.recalcWireList(tia.addressBusPads) # 6507 AB7 goes to TIA CS3 and PIA CS1 # 6507 AB12 goes to TIA CS0 and PIA CS0, but which 6502 AB line is it? # 6507 AB12, AB11, AB10 are not connected externally, so 6507 AB12 is # 6502 AB15 # # TODO: return changed/unchanged from setHigh, setLow to decide to recalc if addr > 0x7F: # It's not a TIA address, so set TIA CS3 high # Either CS3 high or CS0 high should disable TIA from writing tia.setHigh(tia.padIndCS3) tia.setHigh(tia.padIndCS0) else: # It is a TIA addr from 0x00 to 0x7F, so set CS3 and CS0 low tia.setLow(tia.padIndCS3) tia.setLow(tia.padIndCS0) tia.recalcWireList(tia.padIndsCS0CS3) self.updateDataBus() # Advance the TIA 2nd input clock that is controlled # by the 6507's clock generator. tia.setPulled(tia.padIndCLK2, cpu.isHigh(cpu.padIndCLK1Out)) tia.recalcWire(tia.padIndCLK2) #print('TIA sim num wires added to groups %d, num ant %d'% # (tia.numAddWireToGroup, tia.numAddWireTransistor)) tia.clearSimStats() # Advance TIA 'CLK0' by one half clock tia.setPulled(tia.padIndCLK0, not tia.isHigh(tia.padIndCLK0)) tia.recalcWire(tia.padIndCLK0) tia.halfClkCount += 1 # This is a good place to record the TIA and 6507 (6502) # state if you want to capture something like a logic # analyzer trace. # Transfer bits from TIA pads to 6507 pads # TIA RDY and 6507 RDY are pulled high through external resistor, so pull # the pad low if the TIA RDY_lowCtrl is on. cpu.setPulled(cpu.padIndRDY, not tia.isHigh(tia.indRDY_lowCtrl)) cpu.recalcWire(cpu.padIndRDY) # TIA sends a clock to the 6507. Propagate this clock from the # TIA simulation to the 6507 simulation. clkTo6507IsHigh = tia.isHigh(tia.padIndPH0) if clkTo6507IsHigh != cpu.isHigh(cpu.padIndCLK0): # Emulate the PIA timer # Here at Visual6502.org, we're building a gate-level model # of the PIA, but it's not ready yet. pia = self.emuPIA if clkTo6507IsHigh: # When its reached its end, it counts down from 0xFF every clock # (every time the input clock is high, it advances) if pia.timerFinished: pia.timerValue -= 1 if pia.timerValue < 0: # Assume it doesn't wrap around pia.timerValue = 0 else: pia.timerClockCount += 1 if pia.timerClockCount >= pia.timerPeriod: # decrement interval counter pia.timerValue -= 1 pia.timerClockCount = 0 if pia.timerValue < 0: pia.timerFinished = True pia.timerValue = 0xFF # Advance the 6502 simulation 1 half clock cycle if clkTo6507IsHigh: cpu.setPulledHigh(cpu.padIndCLK0) else: cpu.setPulledLow(cpu.padIndCLK0) # Put PIA count value into memory so 6507 can read it # like a regular memory read. self.writeMemory(0x284, pia.timerValue) cpu.recalcWire(cpu.padIndCLK0) cpu.halfClkCount += 1 addr = cpu.getAddressBusValue() if cpu.isHigh(cpu.padIndCLK0): if cpu.isLow(cpu.padIndRW): data = cpu.getDataBusValue() self.writeMemory(addr, data) else: # 6507's CLK0 is low if cpu.isHigh(cpu.padIndRW): self.readMemory(addr)
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nanoemoji.colors import Color from nanoemoji.color_glyph import ColorGlyph from nanoemoji.config import FontConfig from nanoemoji.paint import * from picosvg.svg import SVG from picosvg.svg_transform import Affine2D import dataclasses import io import os import pprint import pytest import ufoLib2 # TODO test _glyph_name obeys codepoint order def _ufo(config): ufo = ufoLib2.Font() ufo.info.unitsPerEm = config.upem ufo.info.ascender = config.ascender ufo.info.descender = config.descender return ufo def _test_file(filename): return os.path.join(os.path.dirname(__file__), filename) def _nsvg(filename): return SVG.parse(_test_file(filename)).topicosvg() def _pprint(thing): stream = io.StringIO() pprint.pprint(thing, indent=2, stream=stream) return stream.getvalue() @pytest.mark.parametrize( "view_box, upem, width, ascender, descender, expected_transform, expected_width", [ # same upem, flip y ("0 0 1024 1024", 1024, 1024, 1024, 0, Affine2D(1, 0, 0, -1, 0, 1024), 1024), # noto emoji norm. scale, flip y ("0 0 128 128", 1024, 1024, 1024, 0, Affine2D(8, 0, 0, -8, 0, 1024), 1024), # noto emoji emoji_u26be.svg viewBox. Scale, flip y and translate ( "-151 297 128 128", 1024, 1024, 1024, 0, Affine2D(8, 0, 0, -8, 1208, 3400), 1024, ), # made up example. Scale, translate, flip y, center horizontally ( "10 11 20 21", 100, 100, 100, 0, Affine2D(a=4.761905, b=0, c=0, d=-4.761905, e=-45.238095, f=152.380952), 100, ), # noto emoji width, ascender, descender ( "0 0 1024 1024", 1024, 1275, 950, -250, Affine2D(1.171875, 0, 0, -1.171875, 37.5, 950), 1275, ), # wider than tall: uniformly scale by height and stretch advance width to fit ( "0 0 20 10", 100, 100, 100, 0, Affine2D(a=10, b=0, c=0, d=-10, e=0, f=100), 200, ), # taller than wide: uniformly scale by height, center within advance width ( "0 0 10 20", 100, 100, 100, 0, Affine2D(a=5, b=0, c=0, d=-5, e=25, f=100), 100, ), ], ) def test_transform_and_width( view_box, upem, width, ascender, descender, expected_transform, expected_width ): svg_str = ( '<svg version="1.1"' ' xmlns="http://www.w3.org/2000/svg"' f' viewBox="{view_box}"' "><defs/></svg>" ) config = FontConfig( upem=upem, width=width, ascender=ascender, descender=descender ).validate() ufo = _ufo(config) color_glyph = ColorGlyph.create( config, ufo, "duck", 1, "glyph_name", [0x0042], SVG.fromstring(svg_str) ) assert color_glyph.transform_for_font_space() == pytest.approx(expected_transform) assert ufo[color_glyph.ufo_glyph_name].width == expected_width def _round_coords(paint, prec=5): if isinstance(paint, PaintLinearGradient): return dataclasses.replace( paint, p0=Point(round(paint.p0.x, prec), round(paint.p0.y, prec)), p1=Point(round(paint.p1.x, prec), round(paint.p1.y, prec)), p2=Point(round(paint.p2.x, prec), round(paint.p2.y, prec)), ) if isinstance(paint, PaintRadialGradient): return dataclasses.replace( paint, c0=Point(round(paint.c0.x, prec), round(paint.c0.y, prec)), c1=Point(round(paint.c1.x, prec), round(paint.c1.y, prec)), r0=round(paint.r0, prec), r1=round(paint.r1, prec), ) if is_transform(paint): return transformed(paint.gettransform().round(prec), paint.paint) return paint @pytest.mark.parametrize( "svg_in, expected_paints", [ # solid ( "rect.svg", ( PaintGlyph( glyph="M2,2 L8,2 L8,4 L2,4 L2,2 Z", paint=PaintSolid(color=Color.fromstring("blue")), ), PaintGlyph( glyph="M4,4 L10,4 L10,6 L4,6 L4,4 Z", paint=PaintSolid(color=Color.fromstring("blue", alpha=0.8)), ), ), ), # linear ( "linear_gradient_rect.svg", ( PaintGlyph( glyph="M2,2 L8,2 L8,4 L2,4 L2,2 Z", paint=PaintLinearGradient( stops=( ColorStop(stopOffset=0.1, color=Color.fromstring("blue")), ColorStop( stopOffset=0.9, color=Color.fromstring("cyan", 0.8) ), ), p0=Point(200, 800), p1=Point(800, 800), p2=Point(200, 600), ), ), ), ), # radial on square using objectBoundingBox (no wrapping PaintTransform needed) ( "radial_gradient_square.svg", ( PaintGlyph( glyph="M0,0 L10,0 L10,10 L0,10 L0,0 Z", paint=PaintRadialGradient( extend=Extend.REPEAT, stops=( ColorStop( stopOffset=0.05, color=Color.fromstring("fuchsia") ), ColorStop( stopOffset=0.75, color=Color.fromstring("orange") ), ), c0=Point(500, 500), c1=Point(500, 500), r0=0, r1=500, ), ), ), ), # radial on non-square rect using objectBoundingBox ( "radial_gradient_rect.svg", ( PaintGlyph( glyph="M2,2 L8,2 L8,4 L2,4 L2,2 Z", paint=PaintScale( scaleX=1.0, scaleY=0.33333, paint=PaintRadialGradient( extend=Extend.REPEAT, stops=( ColorStop( stopOffset=0.05, color=Color.fromstring("fuchsia") ), ColorStop( stopOffset=0.75, color=Color.fromstring("orange") ), ), c0=Point(500, 2100), c1=Point(500, 2100), r0=0, r1=300, ), ), ), ), ), # radial with gradientTransform ( "radial_gradient_transform.svg", ( PaintGlyph( glyph="M0,0 L1000,0 L1000,1000 L0,1000 L0,0 Z", paint=PaintTransform( transform=(0.93969, 0.0, -0.34202, 0.93969, 0.0, 0.0), paint=PaintRadialGradient( stops=( ColorStop( stopOffset=0.0, color=Color.fromstring("darkblue") ), ColorStop( stopOffset=0.5, color=Color.fromstring("skyblue") ), ColorStop( stopOffset=1.0, color=Color.fromstring("darkblue") ), ), c0=Point(x=733.1865, y=532.08885), c1=Point(x=733.1865, y=532.08885), r0=0, r1=532.08885, ), ), ), ), ), # linear with gradientTransform ( "linear_gradient_transform.svg", ( PaintGlyph( glyph="M0,0 L1000,0 L1000,1000 L0,1000 L0,0 Z", paint=PaintLinearGradient( extend=Extend.REFLECT, stops=( ColorStop(stopOffset=0.0, color=Color.fromstring("green")), ColorStop(stopOffset=0.5, color=Color.fromstring("white")), ColorStop(stopOffset=1.0, color=Color.fromstring("red")), ), p0=Point(x=0, y=1000), p1=Point(x=1000, y=1000), p2=Point(x=-1000, y=0), ), ), ), ), # linear with both gradientTransform and objectBoundingBox ( "linear_gradient_transform_2.svg", ( PaintGlyph( glyph="M100,450 L900,450 L900,550 L100,550 L100,450 Z", paint=PaintLinearGradient( stops=( ColorStop(stopOffset=0.05, color=Color.fromstring("gold")), ColorStop(stopOffset=0.95, color=Color.fromstring("red")), ), p0=Point(x=100, y=550), p1=Point(x=900, y=550), p2=Point(x=100, y=450), ), ), PaintGlyph( glyph="M450,100 L550,100 L550,900 L450,900 L450,100 Z", paint=PaintLinearGradient( stops=( ColorStop(stopOffset=0.05, color=Color.fromstring("gold")), ColorStop(stopOffset=0.95, color=Color.fromstring("red")), ), p0=Point(x=450, y=900), p1=Point(x=450, y=100), p2=Point(x=350, y=900), ), ), ), ), # radial with gradientTransform with almost zero scale, non-zero skew ( "radial_gradient_transform_2.svg", ( PaintGlyph( glyph=( "M51.56,22.14 C51.56,16.32 47.74,6.55 36.02,6.55 C23.9,6.55 20.18,17.89" " 20.18,22.14 C20.18,34.96 21.33,41.31 22.6,43.93 C22.84,44.43 23.56,44.66" " 23.79,43.46 C23.79,43.46 22.89,35.69 22.79,30.15 C22.77,28.86 22.37,24.06" " 25.08,23.46 C35,21.23 40.61,15.97 40.61,15.97 C42.07,19.16 46.63,22.26" " 48.27,23.45 C49.62,24.42 49.43,28.42 49.4,30.12 L48.05,43.44 C48.05,43.44" " 48.13,46.61 49.44,43.94 C50.75,41.26 51.56,26.44 51.56,22.14 Z" ), paint=PaintTransform( transform=(0.0, -1.0, 0.9288, 0.0, 0.0, 0.0), paint=PaintRadialGradient( stops=( ColorStop( stopOffset=0.0, color=Color.fromstring("white") ), ColorStop( stopOffset=1.0, color=Color.fromstring("black") ), ), c0=Point(x=-973.125, y=301.77018), c1=Point(x=-973.125, y=301.77018), r0=0.0, r1=129.01562, ), ), ), ), ), # Shape with opacity, should apply to gradient colors # See https://github.com/googlefonts/picosvg/issues/76 ( "gradient_opacity.svg", ( PaintGlyph( glyph="M2,2 L8,2 L8,4 L2,4 L2,2 Z", paint=PaintLinearGradient( stops=( ColorStop( stopOffset=0.1, color=Color.fromstring("blue", alpha=0.4), ), ColorStop( stopOffset=0.9, color=Color.fromstring("cyan", alpha=0.4 * 0.8), ), ), p0=Point(200.0, 800.0), p1=Point(800.0, 800.0), p2=Point(200.0, 600.0), ), ), PaintGlyph( glyph="M2,5 L8,5 L8,7 L2,7 L2,5 Z", paint=PaintScale( scaleX=1.0, scaleY=0.33333, paint=PaintRadialGradient( stops=( ColorStop( stopOffset=0.1, color=Color.fromstring("red", alpha=0.5), ), ColorStop( stopOffset=0.9, color=Color.fromstring("yellow", alpha=0.5 * 0.8), ), ), c0=Point(x=500.0, y=1200.0), c1=Point(x=500.0, y=1200.0), r0=0.0, r1=300.0, ), ), ), ), ), ( # viewBox="0 0 10 8" (w > h), with a linearGradient from (1, 1) to (9, 1). # The default advance width gets scaled by aspect ratio 1000 * 10/8 == 1250. # Test that linearGradient p0 and p1 are centered horizontally relative to # the scaled advance width (and not relative to the default advance width). "gradient_non_square_viewbox.svg", ( PaintGlyph( glyph="M1,1 L9,1 L9,7 L1,7 L1,1 Z", paint=PaintLinearGradient( stops=( ColorStop(stopOffset=0.1, color=Color.fromstring("blue")), ColorStop(stopOffset=0.9, color=Color.fromstring("cyan")), ), p0=Point(125.0, 875.0), p1=Point(1125.0, 875.0), p2=Point(125.0, 125.0), ), ), ), ), # Gradient with opacity resolves to composition of a solid color with alpha # and the layer(s) in question ( "group_opacity.svg", ( PaintGlyph( glyph="M19,11 L61,11 L61,121 L19,121 Z M29,21 L29,111 L51,111 L51,21 Z", paint=PaintSolid(color=Color(red=0, green=0, blue=0, alpha=0.8)), ), PaintComposite( mode=CompositeMode.SRC_IN, source=PaintColrLayers( layers=( PaintGlyph( glyph="M10,30 L100,30 L100,120 L10,120 L10,30 Z", paint=PaintSolid( color=Color(red=255, green=0, blue=0, alpha=1.0) ), ), PaintGlyph( glyph="M5,25 L105,25 L105,125 L5,125 Z M15,35 L15,115 L95,115 L95,35 Z", paint=PaintSolid( color=Color(red=0, green=0, blue=255, alpha=1.0) ), ), ), ), backdrop=PaintSolid(color=Color(red=0, green=0, blue=0, alpha=0.6)), ), PaintGlyph( glyph=( "M105,50 Q105,67.475 100.288,80.04 Q94.678,95 85,95 Q75.322,95 69.712,80.04 Q65,67.475" " 65,50 Q65,32.525 69.712,19.96 Q75.322,5 85,5 Q94.678,5 100.288,19.96 Q105,32.525" " 105,50 Z M95,50 Q95,34.338 90.925,23.471 Q87.748,15 85,15 Q82.252,15 79.075,23.471" " Q75,34.338 75,50 Q75,65.662 79.075,76.529 Q82.252,85 85,85 Q87.748,85 90.925,76.529" " Q95,65.662 95,50 Z" ), paint=PaintSolid(color=Color(red=0, green=0, blue=0, alpha=1.0)), ), ), ), ], ) def test_color_glyph_layers(svg_in, expected_paints): config = FontConfig(upem=1000, ascender=1000, descender=0, width=1000) color_glyph = ColorGlyph.create( config, _ufo(config), "duck", 1, "g_name", [0x0042], _nsvg(svg_in) ).mutating_traverse(_round_coords) actual_paints = color_glyph.painted_layers if actual_paints != expected_paints: print("A:") print(_pprint(actual_paints)) print("E:") print(_pprint(expected_paints)) assert actual_paints == expected_paints # TODO test that a composite is NOT formed where paint changes
__author__ = 'Copyright (c) 2013-15 Alan Yorinks All rights reserved.' """ Copyright (c) 2013-15 Alan Yorinks All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ from collections import deque import threading import sys import time from .pymata_serial import PyMataSerial from .pymata_command_handler import PyMataCommandHandler # For report data formats refer to http://firmata.org/wiki/Protocol # noinspection PyPep8 class PyMata: """ This class contains the complete set of API methods that permit control of an Arduino Micro-Controller utilizing Firmata or its derivatives. For information about the Firmata protocol, refer to: http://firmata.org/wiki/Protocol """ # some state variables HIGH = 1 # digital pin state high value LOW = 0 # digital pin state low value REPORTING_ENABLE = 1 # enable reporting for REPORT_ANALOG or REPORT_DIGITAL message sent to firmata REPORTING_DISABLE = 0 # disable reporting for REPORT_ANALOG or REPORT_DIGITAL message sent to firmata # Shared Resources - data structures, controlling mechanisms, and reference variables # Commands and data received from Firmata via the serial interface are placed into the command deque. # The pymata_command_handler class removes and processes this information. command_deque = deque() # This is the instance reference to the communications port object arduino = None # This is a thread lock to assure data integrity when reading or writing to the data response tables # (defined in the CommandHandler class). It shared by the pymata class and the pymata_command_handler class. data_lock = threading.RLock() # This is the instance reference to the _command_handler _command_handler = None # verbose can be set to false to suppress output to the console when instantiating PyMata verbose = True # pin modes INPUT = 0x00 # pin set as input OUTPUT = 0x01 # pin set as output ANALOG = 0x02 # analog pin in analogInput mode PWM = 0x03 # digital pin in PWM output mode SERVO = 0x04 # digital pin in Servo output mode I2C = 0x06 # pin included in I2C setup ONEWIRE = 0x07 # possible future feature STEPPER = 0x08 # any pin in stepper mode TONE = 0x09 # Any pin in TONE mode ENCODER = 0x0a SONAR = 0x0b # Any pin in SONAR mode IGNORE = 0x7f LATCH_MODE = 0xE0 # this value is or'ed with pin modes for latched data callback # the following pin modes are not part of or defined by Firmata # but used by PyMata DIGITAL = 0x20 # I2C command operation modes I2C_WRITE = 0B00000000 I2C_READ = 0B00001000 I2C_READ_CONTINUOUSLY = 0B00010000 I2C_STOP_READING = 0B00011000 I2C_READ_WRITE_MODE_MASK = 0B00011000 # Tone commands TONE_TONE = 0 # play a tone TONE_NO_TONE = 1 # turn off tone # Stepper Motor Sub-commands STEPPER_CONFIGURE = 0 # configure a stepper motor for operation STEPPER_STEP = 1 # command a motor to move at the provided speed STEPPER_LIBRARY_VERSION = 2 # used to get stepper library version number # each byte represents a digital port and its value contains the current port settings digital_output_port_pins = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] # noinspection PyPep8Naming def __init__(self, port_id='/dev/ttyACM0', bluetooth=False, verbose=True): """ The "constructor" instantiates the entire interface. It starts the operational threads for the serial interface as well as for the command handler. @param port_id: Communications port specifier (COM3, /dev/ttyACM0, etc) @param bluetooth: Sets start up delays for bluetooth connectivity. Set to False for faster start up. @param verbose: If set to False, the status print statements are suppressed. """ # Currently only serial communication over USB is supported, but in the future # wifi and other transport mechanism support is anticipated try: # save the user's request if specified self.verbose = verbose if self.verbose: print("\nPython Version %s" % sys.version) print('\nPyMata version 2.13 Copyright(C) 2013-16 Alan Yorinks All rights reserved.') # Instantiate the serial support class self.transport = PyMataSerial(port_id, self.command_deque) # wait for HC-06 Bluetooth slave to initialize in case it is being used. if bluetooth: time.sleep(5) # Attempt opening communications with the Arduino micro-controller self.transport.open(self.verbose) # additional wait for HC-06 if it is being used if bluetooth: time.sleep(2) else: # necessary to support Arduino Mega time.sleep(1) # Start the data receive thread self.transport.start() # Instantiate the command handler self._command_handler = PyMataCommandHandler(self) self._command_handler.system_reset() ######################################################################## # constants defined locally from values contained in the command handler ######################################################################## # Data latch state constants to be used when accessing data returned from get_latch_data methods. # The get_latch data methods return [pin_number, latch_state, latched_data, time_stamp] # These three constants define possible values for the second item in the list, latch_state # this pin will be ignored for latching - table initialized with this value self.LATCH_IGNORE = self._command_handler.LATCH_IGNORE # When the next pin value change is received for this pin, if it matches the latch criteria # the data will be latched. self.LATCH_ARMED = self._command_handler.LATCH_ARMED # Data has been latched. Read the data to re-arm the latch. self.LATCH_LATCHED = self._command_handler.LATCH_LATCHED # # These constants are used when setting a data latch. # Latch threshold types # self.DIGITAL_LATCH_HIGH = self._command_handler.DIGITAL_LATCH_HIGH self.DIGITAL_LATCH_LOW = self._command_handler.DIGITAL_LATCH_LOW self.ANALOG_LATCH_GT = self._command_handler.ANALOG_LATCH_GT self.ANALOG_LATCH_LT = self._command_handler.ANALOG_LATCH_LT self.ANALOG_LATCH_GTE = self._command_handler.ANALOG_LATCH_GTE self.ANALOG_LATCH_LTE = self._command_handler.ANALOG_LATCH_LTE # constants to be used to parse the data returned from calling # get_X_latch_data() self.LATCH_PIN = 0 self.LATCH_STATE = 1 self.LATCHED_DATA = 2 self.LATCHED_TIME_STAMP = 3 # Start the command processing thread self._command_handler.start() # Command handler should now be prepared to receive replies from the Arduino, so go ahead # detect the Arduino board if self.verbose: print('\nPlease wait while Arduino is being detected. This can take up to 30 seconds ...') # perform board auto discovery if not self._command_handler.auto_discover_board(self.verbose): # board was not found so shutdown if self.verbose: print("Board Auto Discovery Failed!, Shutting Down") self._command_handler.stop() self.transport.stop() self._command_handler.join() self.transport.join() time.sleep(2) except KeyboardInterrupt: if self.verbose: print("Program Aborted Before PyMata Instantiated") sys.exit() def analog_mapping_query(self): """ Send an analog mapping query message via sysex. Client retrieves the results with a call to get_analog_mapping_request_results() """ self._command_handler.send_sysex(self._command_handler.ANALOG_MAPPING_QUERY, None) def analog_read(self, pin): """ Retrieve the last analog data value received for the specified pin. @param pin: Selected pin @return: The last value entered into the analog response table. """ with self.data_lock: data = self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE] return data def analog_write(self, pin, value): """ Set the specified pin to the specified value. @param pin: Pin number @param value: Pin value @return: No return value """ if self._command_handler.ANALOG_MESSAGE + pin < 0xf0: command = [self._command_handler.ANALOG_MESSAGE + pin, value & 0x7f, (value >> 7) & 0x7f] self._command_handler.send_command(command) else: self.extended_analog(pin, value) def capability_query(self): """ Send a Firmata capability query message via sysex. Client retrieves the results with a call to get_capability_query_results() The Arduino can be rather slow in responding to this command. For the Mega 2560 R3 it has taken up to 25 seconds for a response. """ self._command_handler.send_sysex(self._command_handler.CAPABILITY_QUERY, None) def close(self): """ This method will close the transport (serial port) and exit @return: No return value, but sys.exit(0) is called. """ self._command_handler.system_reset() self._command_handler.stop() self.transport.stop() self.transport.close() if self.verbose: print("PyMata close(): Calling sys.exit(0): Hope to see you soon!") sys.exit(0) def digital_read(self, pin): """ Retrieve the last digital data value received for the specified pin. NOTE: This command will return values for digital, pwm, etc, pin types @param pin: Selected pin @return: The last value entered into the digital response table. """ with self.data_lock: data = \ self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE] return data def digital_write(self, pin, value): """ Set the specified pin to the specified value. @param pin: pin number @param value: pin value @return: No return value """ # The command value is not a fixed value, but needs to be calculated using the # pin's port number # # port = pin // 8 calculated_command = self._command_handler.DIGITAL_MESSAGE + port mask = 1 << (pin % 8) # Calculate the value for the pin's position in the port mask if value == 1: self.digital_output_port_pins[port] |= mask else: self.digital_output_port_pins[port] &= ~mask # Assemble the command command = (calculated_command, self.digital_output_port_pins[port] & 0x7f, (self.digital_output_port_pins[port] >> 7) & 0x7f) self._command_handler.send_command(command) def disable_analog_reporting(self, pin): """ Disables analog reporting for a single analog pin. @param pin: Analog pin number. For example for A0, the number is 0. @return: No return value """ command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_DISABLE] self._command_handler.send_command(command) def disable_digital_reporting(self, pin): """ Disables digital reporting. By turning reporting off for this pin, reporting is disabled for all 8 bits in the "port" - @param pin: Pin and all pins for this port @return: No return value """ port = pin // 8 command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_DISABLE] self._command_handler.send_command(command) def enable_analog_reporting(self, pin): """ Enables analog reporting. By turning reporting on for a single pin, @param pin: Analog pin number. For example for A0, the number is 0. @return: No return value """ command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_ENABLE] self._command_handler.send_command(command) def enable_digital_reporting(self, pin): """ Enables digital reporting. By turning reporting on for all 8 bits in the "port" - this is part of Firmata's protocol specification. @param pin: Pin and all pins for this port @return: No return value """ port = pin // 8 command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_ENABLE] self._command_handler.send_command(command) def encoder_config(self, pin_a, pin_b, cb=None): """ This command enables the rotary encoder (2 pin + ground) and will enable encoder reporting. NOTE: This command is not currently part of standard arduino firmata, but is provided for legacy support of CodeShield on an Arduino UNO. Encoder data is retrieved by performing a digital_read from pin a (encoder pin 1) @param pin_a: Encoder pin 1. @param pin_b: Encoder pin 2. @param cb: callback function to report encoder changes @return: No return value """ data = [pin_a, pin_b] self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_MODE] \ = self.ENCODER self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_digital_reporting(pin_a) self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_MODE] \ = self.ENCODER self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_digital_reporting(pin_b) self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, data) def extended_analog(self, pin, data): """ This method will send an extended data analog output command to the selected pin @param pin: 0 - 127 @param data: 0 - 0xfffff """ analog_data = [pin, data & 0x7f, (data >> 7) & 0x7f, (data >> 14) & 0x7f] self._command_handler.send_sysex(self._command_handler.EXTENDED_ANALOG, analog_data) def get_analog_latch_data(self, pin): """ A list is returned containing the latch state for the pin, the latched value, and the time stamp [pin_num, latch_state, latched_value, time_stamp] If the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero) @param pin: Pin number. @return: [pin, latch_state, latch_data_value, time_stamp] """ return self._command_handler.get_analog_latch_data(pin) def get_analog_mapping_request_results(self): """ Call this method after calling analog_mapping_query() to retrieve its results @return: raw data returned by firmata """ return self._command_handler.analog_mapping_query_results def get_analog_response_table(self): """ This method returns a list of lists representing the current pin mode and associated data values for all analog pins. All configured pin types, both input and output will be listed. Output pin data will contain zero. @return: The last update of the digital response table """ return self._command_handler.get_analog_response_table() def get_encoder_response_table(self): return self._command_handler.get_encoder_response_table() def get_imu_response_table(self): return self._command_handler.get_imu_response_table() def get_imu_init_response_table(self): return self._command_handler.get_imu_init_response_table() def get_one_servo_response_table(self): return self._command_handler.get_one_servo_response_table() def get_capability_query_results(self): """ Retrieve the data returned by a previous call to capability_query() @return: Raw capability data returned by firmata """ return self._command_handler.capability_query_results def get_digital_latch_data(self, pin): """ A list is returned containing the latch state for the pin, the latched value, and the time stamp [pin_num, latch_state, latched_value, time_stamp] If the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero) @param pin: Pin number. @return: [pin, latch_state, latch_data_value, time_stamp] """ return self._command_handler.get_digital_latch_data(pin) def get_digital_response_table(self): """ This method returns a list of lists representing the current pin mode and associated data for all digital pins. All pin types, both input and output will be listed. Output pin data will contain zero. @return: The last update of the digital response table """ return self._command_handler.get_digital_response_table() def get_firmata_version(self): """ Retrieve the firmata version information returned by a previous call to refresh_report_version() @return: Firmata_version list [major, minor] or None """ return self._command_handler.firmata_version def get_firmata_firmware_version(self): """ Retrieve the firmware id information returned by a previous call to refresh_report_firmware() @return: Firmata_firmware list [major, minor, file_name] or None """ return self._command_handler.firmata_firmware def get_pin_state_query_results(self): """ This method returns the results of a previous call to pin_state_query() and then resets the pin state query data to None @return: Raw pin state query data """ r_data = self._command_handler.last_pin_query_results self._command_handler.last_pin_query_results = [] return r_data # noinspection PyMethodMayBeStatic def get_pymata_version(self): """ Returns the PyMata version number in a list: [Major Number, Minor Number] @return: """ return ['2', '08'] # noinspection PyMethodMayBeStatic def get_sonar_data(self): """ Retrieve Ping (HC-SR04 type) data. The data is presented as a dictionary. The 'key' is the trigger pin specified in sonar_config() and the 'data' is the current measured distance (in centimeters) for that pin. If there is no data, the value is set to IGNORE (127). @return: active_sonar_map """ return self._command_handler.active_sonar_map def get_stepper_version(self, timeout=20): """ @param timeout: specify a time to allow arduino to process and return a version @return: the stepper version number if it was set. """ # get current time start_time = time.time() # wait for up to 20 seconds for a successful capability query to occur while self._command_handler.stepper_library_version <= 0: if time.time() - start_time > timeout: if self.verbose is True: print("Stepper Library Version Request timed-out. " "Did you send a stepper_request_library_version command?") return else: pass return self._command_handler.stepper_library_version def i2c_config(self, read_delay_time=0, pin_type=None, clk_pin=0, data_pin=0): """ NOTE: THIS METHOD MUST BE CALLED BEFORE ANY I2C REQUEST IS MADE This method initializes Firmata for I2c operations. It allows setting of a read time delay amount, and to optionally track the pins as I2C in the appropriate response table. To track pins: Set the pin_type to ANALOG or DIGITAL and provide the pin numbers. If using ANALOG, pin numbers use the analog number, for example A4: use 4. @param read_delay_time: an optional parameter, default is 0 @param pin_type: ANALOG or DIGITAL to select response table type to track pin numbers @param clk_pin: pin number (see comment above). @param data_pin: pin number (see comment above). @return: No Return Value """ data = [read_delay_time & 0x7f, (read_delay_time >> 7) & 0x7f] self._command_handler.send_sysex(self._command_handler.I2C_CONFIG, data) # If pin type is set, set pin mode in appropriate response table for these pins if pin_type: if pin_type == self.DIGITAL: self._command_handler.digital_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE] \ = self.I2C self._command_handler.digital_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE] \ = self.I2C else: self._command_handler.analog_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE] \ = self.I2C self._command_handler.analog_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE] \ = self.I2C def i2c_read(self, address, register, number_of_bytes, read_type, cb=None): """ This method requests the read of an i2c device. Results are retrieved by a call to i2c_get_read_data(). If a callback method is provided, when data is received from the device it will be sent to the callback method @param address: i2c device address @param register: register number (can be set to zero) @param number_of_bytes: number of bytes expected to be returned @param read_type: I2C_READ or I2C_READ_CONTINUOUSLY @param cb: Optional callback function to report i2c data as result of read command """ data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f, number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f] # add or update entry in i2c_map for reply self._command_handler.i2c_map[address] = [cb, None] self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) def i2c_write(self, address, *args): """ Write data to an i2c device. @param address: i2c device address @param args: A variable number of bytes to be sent to the device """ data = [address, self.I2C_WRITE] for item in args: data.append(item & 0x7f) data.append((item >> 7) & 0x7f) self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) def i2c_stop_reading(self, address): """ This method stops an I2C_READ_CONTINUOUSLY operation for the i2c device address specified. @param address: address of i2c device """ data = [address, self.I2C_STOP_READING] self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) def i2c_get_read_data(self, address): """ This method retrieves the i2c read data as the result of an i2c_read() command. @param address: i2c device address @return: raw data read from device """ if address in self._command_handler.i2c_map: map_entry = self._command_handler.i2c_map[address] return map_entry[1] def pin_state_query(self, pin): """ This method issues a pin state query command. Data returned is retrieved via a call to get_pin_state_query_results() @param pin: pin number """ self._command_handler.send_sysex(self._command_handler.PIN_STATE_QUERY, [pin]) def play_tone(self, pin, tone_command, frequency, duration): """ This method will call the Tone library for the selected pin. If the tone command is set to TONE_TONE, then the specified tone will be played. Else, if the tone command is TONE_NO_TONE, then any currently playing tone will be disabled. It is intended for a future release of Arduino Firmata @param pin: Pin number @param tone_command: Either TONE_TONE, or TONE_NO_TONE @param frequency: Frequency of tone @param duration: Duration of tone in milliseconds @return: No return value """ # convert the integer values to bytes if tone_command == self.TONE_TONE: # duration is specified if duration: data = [tone_command, pin, frequency & 0x7f, (frequency >> 7) & 0x7f, duration & 0x7f, (duration >> 7) & 0x7f] else: data = [tone_command, pin, frequency & 0x7f, (frequency >> 7) & 0x7f, 0, 0] self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \ self.TONE # turn off tone else: data = [tone_command, pin] self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data) def refresh_report_version(self): """ This method will query firmata for the report version. Retrieve the report version via a call to get_firmata_version() """ command = [self._command_handler.REPORT_VERSION] self._command_handler.send_command(command) def refresh_report_firmware(self): """ This method will query firmata to report firmware. Retrieve the report via a call to get_firmata_firmware_version() """ self._command_handler.send_sysex(self._command_handler.REPORT_FIRMWARE, None) def reset(self): """ This command sends a reset message to the Arduino. The response tables will be reinitialized @return: No return value. """ # set all output pins to a value of 0 for pin in range(0, self._command_handler.total_pins_discovered): if self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \ == self.PWM: self.analog_write(pin, 0) elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \ == self.SERVO: self.analog_write(pin, 0) elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \ == self.TONE: data = [self.TONE_NO_TONE, pin] self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data) else: self.digital_write(pin, 0) self._command_handler.system_reset() def set_analog_latch(self, pin, threshold_type, threshold_value, cb=None): """ This method "arms" an analog pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. In that case, the latching table is not updated. @param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5 @param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE @param threshold_value: numerical value - between 0 and 1023 @param cb: callback method @return: True if successful, False if parameter data is invalid """ if self.ANALOG_LATCH_GT <= threshold_type <= self.ANALOG_LATCH_LTE: if 0 <= threshold_value <= 1023: self._command_handler.set_analog_latch(pin, threshold_type, threshold_value, cb) return True else: return False def set_digital_latch(self, pin, threshold_type, cb=None): """ This method "arms" a digital pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. In that case, the latching table is not updated. @param pin: Digital pin number @param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW @param cb: callback function @return: True if successful, False if parameter data is invalid """ if 0 <= threshold_type <= 1: self._command_handler.set_digital_latch(pin, threshold_type, cb) return True else: return False def set_pin_mode(self, pin, mode, pin_type, cb=None): """ This method sets a pin to the desired pin mode for the pin_type. It automatically enables data reporting. NOTE: DO NOT CALL THIS METHOD FOR I2C. See i2c_config(). @param pin: Pin number (for analog use the analog number, for example A4: use 4) @param mode: INPUT, OUTPUT, PWM @param pin_type: ANALOG or DIGITAL @param cb: This is an optional callback function to report data changes to the user @return: No return value """ print "SET_PIN_MODE" command = [self._command_handler.SET_PIN_MODE, pin, mode] self._command_handler.send_command(command) # enable reporting for input pins if mode == self.INPUT: if pin_type == self.ANALOG: # set analog response table to show this pin is an input pin self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \ self.INPUT self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_analog_reporting(pin) # if not analog it has to be digital else: self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \ self.INPUT self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_digital_reporting(pin) else: # must be output - so set the tables accordingly if pin_type == self.ANALOG: self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = mode else: self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = mode def set_sampling_interval(self, interval): """ This method sends the desired sampling interval to Firmata. Note: Standard Firmata will ignore any interval less than 10 milliseconds @param interval: Integer value for desired sampling interval in milliseconds @return: No return value. """ data = [interval & 0x7f, (interval >> 7) & 0x7f] self._command_handler.send_sysex(self._command_handler.SAMPLING_INTERVAL, data) def set_encoder_mode(self, enable_disable, module, mode): command = [enable_disable, module, mode] self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, command) def servo_config(self, pin, min_pulse=450, max_pulse=2800): """ Configure a pin as a servo pin. Set pulse min, max in ms. @param pin: Servo Pin. @param min_pulse: Min pulse width in ms. @param max_pulse: Max pulse width in ms. @return: No return value """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) command = [pin, min_pulse & 0x7f, (min_pulse >> 7) & 0x7f, max_pulse & 0x7f, (max_pulse >> 7) & 0x7f] self._command_handler.send_sysex(self._command_handler.SERVO_CONFIG, command) def move_one_servo_ex(self, pin, angle, mode, msec): """ This method is added for 86Duino Servo library. """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) data = [pin, angle & 0x7f, angle >> 7, mode, msec & 0x7F, msec >> 7] self._command_handler.send_sysex(self._command_handler.MOVE_ONE_SERVO, data) def config_servo_ex(self, pin, angle, mode, msec): """ This method is added for 86Duino Servo library. """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) data = [pin, angle & 0x7f, angle >> 7, mode, msec & 0x7F, msec >> 7] self._command_handler.send_sysex(self._command_handler.SERVO_CONFIG_EX, data) def test_data_query(self): """ This method is added dor 86Duino Servo library by Acen. """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) self._command_handler.send_sysex(self._command_handler.TEST_DATA_QUERY) def move_servo_all(self): """ This method is added dor 86Duino Servo library by Acen. """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) self._command_handler.send_sysex(self._command_handler.SERVO_MOVING_ALL) def init_imu(self, id): """ This method is added dor 86Duino Servo library by Acen. """ # self.set_pin_mode(pin, self.SERVO, self.OUTPUT) data = [id & 0x7F, id >> 7] self._command_handler.send_sysex(self._command_handler.ENABLE_IMU, data) def sonar_config(self, trigger_pin, echo_pin, cb=None, ping_interval=50, max_distance=200): """ Configure the pins,ping interval and maximum distance for an HC-SR04 type device. Single pin configuration may be used. To do so, set both the trigger and echo pins to the same value. Up to a maximum of 6 SONAR devices is supported If the maximum is exceeded a message is sent to the console and the request is ignored. NOTE: data is measured in centimeters @param trigger_pin: The pin number of for the trigger (transmitter). @param echo_pin: The pin number for the received echo. @param ping_interval: Minimum interval between pings. Lowest number to use is 33 ms.Max is 127 @param max_distance: Maximum distance in cm. Max is 200. @param cb: optional callback function to report sonar data changes """ if max_distance > 200: max_distance = 200 max_distance_lsb = max_distance & 0x7f max_distance_msb = (max_distance >> 7) & 0x7f data = [trigger_pin, echo_pin, ping_interval, max_distance_lsb, max_distance_msb] self.set_pin_mode(trigger_pin, self.SONAR, self.INPUT) self.set_pin_mode(echo_pin, self.SONAR, self.INPUT) # update the ping data map for this pin if len(self._command_handler.active_sonar_map) > 6: if self.verbose: print("sonar_config: maximum number of devices assigned - ignoring request") return else: with self.data_lock: # self._command_handler.active_sonar_map[trigger_pin] = self.IGNORE self._command_handler.active_sonar_map[trigger_pin] = [cb, [self.IGNORE]] self._command_handler.send_sysex(self._command_handler.SONAR_CONFIG, data) def stepper_config(self, steps_per_revolution, stepper_pins): """ Configure stepper motor prior to operation. @param steps_per_revolution: number of steps per motor revolution @param stepper_pins: a list of control pin numbers - either 4 or 2 """ data = [self.STEPPER_CONFIGURE, steps_per_revolution & 0x7f, (steps_per_revolution >> 7) & 0x7f] for pin in range(len(stepper_pins)): data.append(stepper_pins[pin]) self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data) def stepper_step(self, motor_speed, number_of_steps): """ Move a stepper motor for the number of steps at the specified speed @param motor_speed: 21 bits of data to set motor speed @param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse """ if number_of_steps > 0: direction = 1 else: direction = 0 abs_number_of_steps = abs(number_of_steps) data = [self.STEPPER_STEP, motor_speed & 0x7f, (motor_speed >> 7) & 0x7f, (motor_speed >> 14) & 0x7f, abs_number_of_steps & 0x7f, (abs_number_of_steps >> 7) & 0x7f, direction] self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data) def stepper_request_library_version(self): """ Request the stepper library version from the Arduino. To retrieve the version after this command is called, call get_stepper_version """ data = [self.STEPPER_LIBRARY_VERSION] self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data) def get_perform_motion_response_table(self): return self._command_handler.get_perform_motion_response_table() def perform_motion(self, id, motion, times): data = [id & 0x7F, id >> 7, motion & 0x7F, motion >> 7, times & 0x7f, times >> 7] self._command_handler.send_sysex(self._command_handler.PERFORM_MOTION, data)
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import logging import time from django.utils.translation import ugettext as _ from desktop.lib.exceptions_renderable import PopupException from hadoop import cluster from hadoop.fs.hadoopfs import Hdfs from liboozie.oozie_api import get_oozie from liboozie.conf import REMOTE_DEPLOYMENT_DIR LOG = logging.getLogger(__name__) class Submission(object): """ Represents one unique Oozie submission. Actions are: - submit - rerun """ def __init__(self, user, job=None, fs=None, jt=None, properties=None, oozie_id=None): self.job = job self.user = user self.fs = fs self.jt = jt self.oozie_id = oozie_id if properties is not None: self.properties = properties else: self.properties = {} def __str__(self): if self.oozie_id: res = "Submission for job '%s'." % (self.oozie_id,) else: res = "Submission for job '%s' (id %s, owner %s)." % (self.job.name, self.job.id, self.user) if self.oozie_id: res += " -- " + self.oozie_id return res def run(self): """ Take care of all the actions of submitting a Oozie workflow. Returns the oozie job id if all goes well. """ if self.oozie_id is not None: raise Exception(_("Submission already submitted (Oozie job id %s)") % (self.oozie_id,)) jobtracker = cluster.get_cluster_addr_for_job_submission() deployment_dir = self.deploy() try: prev = get_oozie().setuser(self.user.username) self._update_properties(jobtracker, deployment_dir) self.oozie_id = get_oozie().submit_job(self.properties) LOG.info("Submitted: %s" % (self,)) if self.job.get_type() == 'workflow': get_oozie().job_control(self.oozie_id, 'start') LOG.info("Started: %s" % (self,)) finally: get_oozie().setuser(prev) return self.oozie_id def rerun(self, deployment_dir, fail_nodes=None, skip_nodes=None): jobtracker = cluster.get_cluster_addr_for_job_submission() try: prev = get_oozie().setuser(self.user.username) self._update_properties(jobtracker, deployment_dir) self.properties.update({'oozie.wf.application.path': deployment_dir}) if fail_nodes: self.properties.update({'oozie.wf.rerun.failnodes': fail_nodes}) elif not skip_nodes: self.properties.update({'oozie.wf.rerun.failnodes': 'false'}) # Case empty 'skip_nodes' list else: self.properties.update({'oozie.wf.rerun.skip.nodes': skip_nodes}) get_oozie().rerun(self.oozie_id, properties=self.properties) LOG.info("Rerun: %s" % (self,)) finally: get_oozie().setuser(prev) return self.oozie_id def rerun_coord(self, deployment_dir, params): jobtracker = cluster.get_cluster_addr_for_job_submission() try: prev = get_oozie().setuser(self.user.username) self._update_properties(jobtracker, deployment_dir) self.properties.update({'oozie.coord.application.path': deployment_dir}) get_oozie().job_control(self.oozie_id, action='coord-rerun', properties=self.properties, parameters=params) LOG.info("Rerun: %s" % (self,)) finally: get_oozie().setuser(prev) return self.oozie_id def rerun_bundle(self, deployment_dir, params): jobtracker = cluster.get_cluster_addr_for_job_submission() try: prev = get_oozie().setuser(self.user.username) self._update_properties(jobtracker, deployment_dir) self.properties.update({'oozie.bundle.application.path': deployment_dir}) get_oozie().job_control(self.oozie_id, action='bundle-rerun', properties=self.properties, parameters=params) LOG.info("Rerun: %s" % (self,)) finally: get_oozie().setuser(prev) return self.oozie_id def deploy(self): try: deployment_dir = self._create_deployment_dir() except Exception, ex: msg = _("Failed to access deployment directory.") LOG.exception(msg) raise PopupException(message=msg, detail=str(ex)) oozie_xml = self.job.to_xml(self.properties) self._do_as(self.user.username , self._copy_files, deployment_dir, oozie_xml) if hasattr(self.job, 'actions'): for action in self.job.actions: # Make sure XML is there # Don't support shared sub-worfklow if action.node_type == 'subworkflow': node = action.get_full_node() sub_deploy = Submission(self.user, node.sub_workflow, self.fs, self.jt, self.properties) sub_deploy.deploy() return deployment_dir def _update_properties(self, jobtracker_addr, deployment_dir): if self.fs and self.jt: self.properties.update({ 'jobTracker': self.jt.logical_name or jobtracker_addr, 'nameNode': self.fs.logical_name or self.fs.fs_defaultfs, }) if self.job: self.properties.update({ self.job.get_application_path_key(): self.fs.get_hdfs_path(deployment_dir), self.job.HUE_ID: self.job.id }) def _create_deployment_dir(self): """ Return the job deployment directory in HDFS, creating it if necessary. The actual deployment dir should be 0711 owned by the user """ # Automatic setup of the required directories if needed create_directories(self.fs) if self.user != self.job.owner: path = Hdfs.join(REMOTE_DEPLOYMENT_DIR.get(), '_%s_-oozie-%s-%s' % (self.user.username, self.job.id, time.time())) self.fs.copy_remote_dir(self.job.deployment_dir, path, owner=self.user, dir_mode=0711) else: path = self.job.deployment_dir self._create_dir(path) return path def _create_dir(self, path, perms=0711): """ Return the directory in HDFS, creating it if necessary. """ try: statbuf = self.fs.stats(path) if not statbuf.isDir: msg = _("Path is not a directory: %s.") % (path,) LOG.error(msg) raise Exception(msg) except IOError, ex: if ex.errno != errno.ENOENT: msg = _("Error accessing directory '%s': %s.") % (path, ex) LOG.exception(msg) raise IOError(ex.errno, msg) if not self.fs.exists(path): self._do_as(self.user.username , self.fs.mkdir, path, perms) self._do_as(self.user.username , self.fs.chmod, path, perms) return path def _copy_files(self, deployment_dir, oozie_xml): """ Copy the files over to the deployment directory. This should run as the design owner. """ xml_path = self.fs.join(deployment_dir, self.job.get_application_filename()) self.fs.create(xml_path, overwrite=True, permission=0644, data=oozie_xml) LOG.debug("Created %s" % (xml_path,)) # Copy the files over files = [] if hasattr(self.job, 'node_list'): for node in self.job.node_list: if hasattr(node, 'jar_path') and node.jar_path.startswith('/'): files.append(node.jar_path) if files: lib_path = self.fs.join(deployment_dir, 'lib') if self.fs.exists(lib_path): LOG.debug("Cleaning up old %s" % (lib_path,)) self.fs.rmtree(lib_path) self.fs.mkdir(lib_path, 0755) LOG.debug("Created %s" % (lib_path,)) for file in files: self.fs.copyfile(file, self.fs.join(lib_path, self.fs.basename(file))) def _do_as(self, username, fn, *args, **kwargs): prev_user = self.fs.user try: self.fs.setuser(username) return fn(*args, **kwargs) finally: self.fs.setuser(prev_user) def remove_deployment_dir(self): """Delete the workflow deployment directory.""" try: path = self.job.deployment_dir if self._do_as(self.user.username , self.fs.exists, path): self._do_as(self.user.username , self.fs.rmtree, path) except Exception, ex: LOG.warn("Failed to clean up workflow deployment directory for " "%s (owner %s). Caused by: %s", self.job.name, self.user, ex) def create_directories(fs, directory_list=[]): # If needed, create the remote home, deployment and data directories directories = [REMOTE_DEPLOYMENT_DIR.get()] + directory_list for directory in directories: if not fs.do_as_user(fs.DEFAULT_USER, fs.exists, directory): remote_home_dir = Hdfs.join('/user', fs.DEFAULT_USER) if directory.startswith(remote_home_dir): # Home is 755 fs.do_as_user(fs.DEFAULT_USER, fs.create_home_dir, remote_home_dir) # Shared by all the users fs.do_as_user(fs.DEFAULT_USER, fs.mkdir, directory, 01777) fs.do_as_user(fs.DEFAULT_USER, fs.chmod, directory, 01777) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import optparse import platform import re import sys from error import NoSuchProjectError from error import InvalidProjectGroupsError class Command(object): """Base class for any command line action in repo. """ common = False manifest = None _optparse = None def WantPager(self, opt): return False def ReadEnvironmentOptions(self, opts): """ Set options from environment variables. """ env_options = self._RegisteredEnvironmentOptions() for env_key, opt_key in env_options.items(): # Get the user-set option value if any opt_value = getattr(opts, opt_key) # If the value is set, it means the user has passed it as a command # line option, and we should use that. Otherwise we can try to set it # with the value from the corresponding environment variable. if opt_value is not None: continue env_value = os.environ.get(env_key) if env_value is not None: setattr(opts, opt_key, env_value) return opts @property def OptionParser(self): if self._optparse is None: try: me = 'repo %s' % self.NAME usage = self.helpUsage.strip().replace('%prog', me) except AttributeError: usage = 'repo %s' % self.NAME self._optparse = optparse.OptionParser(usage = usage) self._Options(self._optparse) return self._optparse def _Options(self, p): """Initialize the option parser. """ def _RegisteredEnvironmentOptions(self): """Get options that can be set from environment variables. Return a dictionary mapping environment variable name to option key name that it can override. Example: {'REPO_MY_OPTION': 'my_option'} Will allow the option with key value 'my_option' to be set from the value in the environment variable named 'REPO_MY_OPTION'. Note: This does not work properly for options that are explicitly set to None by the user, or options that are defined with a default value other than None. """ return {} def Usage(self): """Display usage and terminate. """ self.OptionParser.print_usage() sys.exit(1) def Execute(self, opt, args): """Perform the action, after option parsing is complete. """ raise NotImplementedError def _ResetPathToProjectMap(self, projects): self._by_path = dict((p.worktree, p) for p in projects) def _UpdatePathToProjectMap(self, project): self._by_path[project.worktree] = project def _GetProjectByPath(self, path): project = None if os.path.exists(path): oldpath = None while path \ and path != oldpath \ and path != self.manifest.topdir: try: project = self._by_path[path] break except KeyError: oldpath = path path = os.path.dirname(path) else: try: project = self._by_path[path] except KeyError: pass return project def GetProjects(self, args, missing_ok=False, submodules_ok=False): """A list of projects that match the arguments. """ all_projects = self.manifest.projects result = [] mp = self.manifest.manifestProject groups = mp.config.GetString('manifest.groups') if not groups: groups = 'default,platform-' + platform.system().lower() groups = [x for x in re.split(r'[,\s]+', groups) if x] if not args: all_projects_list = list(all_projects.values()) derived_projects = {} for project in all_projects_list: if submodules_ok or project.sync_s: derived_projects.update((p.name, p) for p in project.GetDerivedSubprojects()) all_projects_list.extend(derived_projects.values()) for project in all_projects_list: if ((missing_ok or project.Exists) and project.MatchesGroups(groups)): result.append(project) else: self._ResetPathToProjectMap(all_projects.values()) for arg in args: project = all_projects.get(arg) if not project: path = os.path.abspath(arg).replace('\\', '/') project = self._GetProjectByPath(path) # If it's not a derived project, update path->project mapping and # search again, as arg might actually point to a derived subproject. if (project and not project.Derived and (submodules_ok or project.sync_s)): search_again = False for subproject in project.GetDerivedSubprojects(): self._UpdatePathToProjectMap(subproject) search_again = True if search_again: project = self._GetProjectByPath(path) or project if not project: raise NoSuchProjectError(arg) if not missing_ok and not project.Exists: raise NoSuchProjectError(arg) if not project.MatchesGroups(groups): raise InvalidProjectGroupsError(arg) result.append(project) def _getpath(x): return x.relpath result.sort(key=_getpath) return result def FindProjects(self, args): result = [] patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args] for project in self.GetProjects(''): for pattern in patterns: if pattern.search(project.name) or pattern.search(project.relpath): result.append(project) break result.sort(key=lambda project: project.relpath) return result # pylint: disable=W0223 # Pylint warns that the `InteractiveCommand` and `PagedCommand` classes do not # override method `Execute` which is abstract in `Command`. Since that method # is always implemented in classes derived from `InteractiveCommand` and # `PagedCommand`, this warning can be suppressed. class InteractiveCommand(Command): """Command which requires user interaction on the tty and must not run within a pager, even if the user asks to. """ def WantPager(self, opt): return False class PagedCommand(Command): """Command which defaults to output in a pager, as its display tends to be larger than one screen full. """ def WantPager(self, opt): return True # pylint: enable=W0223 class MirrorSafeCommand(object): """Command permits itself to run within a mirror, and does not require a working directory. """
# LayerMapping -- A Django Model/OGR Layer Mapping Utility """ The LayerMapping class provides a way to map the contents of OGR vector files (e.g. SHP files) to Geographic-enabled Django models. For more information, please consult the GeoDjango documentation: http://geodjango.org/docs/layermapping.html """ import sys from decimal import Decimal, InvalidOperation as DecimalInvalidOperation from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist from django.db import connections, router from django.contrib.gis.db.models import GeometryField from django.contrib.gis.gdal import (CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType, SpatialReference) from django.contrib.gis.gdal.field import ( OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime) from django.db import models, transaction from django.utils import six from django.utils.encoding import force_text # LayerMapping exceptions. class LayerMapError(Exception): pass class InvalidString(LayerMapError): pass class InvalidDecimal(LayerMapError): pass class InvalidInteger(LayerMapError): pass class MissingForeignKey(LayerMapError): pass class LayerMapping(object): "A class that maps OGR Layers to GeoDjango Models." # Acceptable 'base' types for a multi-geometry type. MULTI_TYPES = {1: OGRGeomType('MultiPoint'), 2: OGRGeomType('MultiLineString'), 3: OGRGeomType('MultiPolygon'), OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'), OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'), OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'), } # Acceptable Django field types and corresponding acceptable OGR # counterparts. FIELD_TYPES = { models.AutoField: OFTInteger, models.IntegerField: (OFTInteger, OFTReal, OFTString), models.FloatField: (OFTInteger, OFTReal), models.DateField: OFTDate, models.DateTimeField: OFTDateTime, models.EmailField: OFTString, models.TimeField: OFTTime, models.DecimalField: (OFTInteger, OFTReal), models.CharField: OFTString, models.SlugField: OFTString, models.TextField: OFTString, models.URLField: OFTString, models.BigIntegerField: (OFTInteger, OFTReal, OFTString), models.SmallIntegerField: (OFTInteger, OFTReal, OFTString), models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString), } def __init__(self, model, data, mapping, layer=0, source_srs=None, encoding='utf-8', transaction_mode='commit_on_success', transform=True, unique=None, using=None): """ A LayerMapping object is initialized using the given Model (not an instance), a DataSource (or string path to an OGR-supported data file), and a mapping dictionary. See the module level docstring for more details and keyword argument usage. """ # Getting the DataSource and the associated Layer. if isinstance(data, six.string_types): self.ds = DataSource(data, encoding=encoding) else: self.ds = data self.layer = self.ds[layer] self.using = using if using is not None else router.db_for_write(model) self.spatial_backend = connections[self.using].ops # Setting the mapping & model attributes. self.mapping = mapping self.model = model # Checking the layer -- initialization of the object will fail if # things don't check out before hand. self.check_layer() # Getting the geometry column associated with the model (an # exception will be raised if there is no geometry column). if connections[self.using].features.supports_transform: self.geo_field = self.geometry_field() else: transform = False # Checking the source spatial reference system, and getting # the coordinate transformation object (unless the `transform` # keyword is set to False) if transform: self.source_srs = self.check_srs(source_srs) self.transform = self.coord_transform() else: self.transform = transform # Setting the encoding for OFTString fields, if specified. if encoding: # Making sure the encoding exists, if not a LookupError # exception will be thrown. from codecs import lookup lookup(encoding) self.encoding = encoding else: self.encoding = None if unique: self.check_unique(unique) transaction_mode = 'autocommit' # Has to be set to autocommit. self.unique = unique else: self.unique = None # Setting the transaction decorator with the function in the # transaction modes dictionary. self.transaction_mode = transaction_mode if transaction_mode == 'autocommit': self.transaction_decorator = None elif transaction_mode == 'commit_on_success': self.transaction_decorator = transaction.atomic else: raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode) #### Checking routines used during initialization #### def check_fid_range(self, fid_range): "This checks the `fid_range` keyword." if fid_range: if isinstance(fid_range, (tuple, list)): return slice(*fid_range) elif isinstance(fid_range, slice): return fid_range else: raise TypeError else: return None def check_layer(self): """ This checks the Layer metadata, and ensures that it is compatible with the mapping information and model. Unlike previous revisions, there is no need to increment through each feature in the Layer. """ # The geometry field of the model is set here. # TODO: Support more than one geometry field / model. However, this # depends on the GDAL Driver in use. self.geom_field = False self.fields = {} # Getting lists of the field names and the field types available in # the OGR Layer. ogr_fields = self.layer.fields ogr_field_types = self.layer.field_types # Function for determining if the OGR mapping field is in the Layer. def check_ogr_fld(ogr_map_fld): try: idx = ogr_fields.index(ogr_map_fld) except ValueError: raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld) return idx # No need to increment through each feature in the model, simply check # the Layer metadata against what was given in the mapping dictionary. for field_name, ogr_name in self.mapping.items(): # Ensuring that a corresponding field exists in the model # for the given field name in the mapping. try: model_field = self.model._meta.get_field(field_name) except FieldDoesNotExist: raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name) # Getting the string name for the Django field class (e.g., 'PointField'). fld_name = model_field.__class__.__name__ if isinstance(model_field, GeometryField): if self.geom_field: raise LayerMapError('LayerMapping does not support more than one GeometryField per model.') # Getting the coordinate dimension of the geometry field. coord_dim = model_field.dim try: if coord_dim == 3: gtype = OGRGeomType(ogr_name + '25D') else: gtype = OGRGeomType(ogr_name) except GDALException: raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name) # Making sure that the OGR Layer's Geometry is compatible. ltype = self.layer.geom_type if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)): raise LayerMapError('Invalid mapping geometry; model has %s%s, ' 'layer geometry type is %s.' % (fld_name, '(dim=3)' if coord_dim == 3 else '', ltype)) # Setting the `geom_field` attribute w/the name of the model field # that is a Geometry. Also setting the coordinate dimension # attribute. self.geom_field = field_name self.coord_dim = coord_dim fields_val = model_field elif isinstance(model_field, models.ForeignKey): if isinstance(ogr_name, dict): # Is every given related model mapping field in the Layer? rel_model = model_field.rel.to for rel_name, ogr_field in ogr_name.items(): idx = check_ogr_fld(ogr_field) try: rel_model._meta.get_field(rel_name) except FieldDoesNotExist: raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' % (rel_name, rel_model.__class__.__name__)) fields_val = rel_model else: raise TypeError('ForeignKey mapping must be of dictionary type.') else: # Is the model field type supported by LayerMapping? if model_field.__class__ not in self.FIELD_TYPES: raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name) # Is the OGR field in the Layer? idx = check_ogr_fld(ogr_name) ogr_field = ogr_field_types[idx] # Can the OGR field type be mapped to the Django field type? if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]): raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' % (ogr_field, ogr_field.__name__, fld_name)) fields_val = model_field self.fields[field_name] = fields_val def check_srs(self, source_srs): "Checks the compatibility of the given spatial reference object." if isinstance(source_srs, SpatialReference): sr = source_srs elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()): sr = source_srs.srs elif isinstance(source_srs, (int, six.string_types)): sr = SpatialReference(source_srs) else: # Otherwise just pulling the SpatialReference from the layer sr = self.layer.srs if not sr: raise LayerMapError('No source reference system defined.') else: return sr def check_unique(self, unique): "Checks the `unique` keyword parameter -- may be a sequence or string." if isinstance(unique, (list, tuple)): # List of fields to determine uniqueness with for attr in unique: if attr not in self.mapping: raise ValueError elif isinstance(unique, six.string_types): # Only a single field passed in. if unique not in self.mapping: raise ValueError else: raise TypeError('Unique keyword argument must be set with a tuple, list, or string.') # Keyword argument retrieval routines #### def feature_kwargs(self, feat): """ Given an OGR Feature, this will return a dictionary of keyword arguments for constructing the mapped model. """ # The keyword arguments for model construction. kwargs = {} # Incrementing through each model field and OGR field in the # dictionary mapping. for field_name, ogr_name in self.mapping.items(): model_field = self.fields[field_name] if isinstance(model_field, GeometryField): # Verify OGR geometry. try: val = self.verify_geom(feat.geom, model_field) except GDALException: raise LayerMapError('Could not retrieve geometry from feature.') elif isinstance(model_field, models.base.ModelBase): # The related _model_, not a field was passed in -- indicating # another mapping for the related Model. val = self.verify_fk(feat, model_field, ogr_name) else: # Otherwise, verify OGR Field type. val = self.verify_ogr_field(feat[ogr_name], model_field) # Setting the keyword arguments for the field name with the # value obtained above. kwargs[field_name] = val return kwargs def unique_kwargs(self, kwargs): """ Given the feature keyword arguments (from `feature_kwargs`) this routine will construct and return the uniqueness keyword arguments -- a subset of the feature kwargs. """ if isinstance(self.unique, six.string_types): return {self.unique: kwargs[self.unique]} else: return {fld: kwargs[fld] for fld in self.unique} #### Verification routines used in constructing model keyword arguments. #### def verify_ogr_field(self, ogr_field, model_field): """ Verifies if the OGR Field contents are acceptable to the Django model field. If they are, the verified value is returned, otherwise the proper exception is raised. """ if (isinstance(ogr_field, OFTString) and isinstance(model_field, (models.CharField, models.TextField))): if self.encoding: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_text(ogr_field.value, self.encoding) else: val = ogr_field.value if model_field.max_length and len(val) > model_field.max_length: raise InvalidString('%s model field maximum string length is %s, given %s characters.' % (model_field.name, model_field.max_length, len(val))) elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( 'A DecimalField with max_digits %d, decimal_places %d must ' 'round to an absolute value less than 10^%d.' % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value) else: val = ogr_field.value return val def verify_fk(self, feat, rel_model, rel_mapping): """ Given an OGR Feature, the related model and its dictionary mapping, this routine will retrieve the related model for the ForeignKey mapping. """ # TODO: It is expensive to retrieve a model for every record -- # explore if an efficient mechanism exists for caching related # ForeignKey models. # Constructing and verifying the related model keyword arguments. fk_kwargs = {} for field_name, ogr_name in rel_mapping.items(): fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name)) # Attempting to retrieve and return the related model. try: return rel_model.objects.using(self.using).get(**fk_kwargs) except ObjectDoesNotExist: raise MissingForeignKey( 'No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs) ) def verify_geom(self, geom, model_field): """ Verifies the geometry -- will construct and return a GeometryCollection if necessary (for example if the model field is MultiPolygonField while the mapped shapefile only contains Polygons). """ # Downgrade a 3D geom to a 2D one, if necessary. if self.coord_dim != geom.coord_dim: geom.coord_dim = self.coord_dim if self.make_multi(geom.geom_type, model_field): # Constructing a multi-geometry type to contain the single geometry multi_type = self.MULTI_TYPES[geom.geom_type.num] g = OGRGeometry(multi_type) g.add(geom) else: g = geom # Transforming the geometry with our Coordinate Transformation object, # but only if the class variable `transform` is set w/a CoordTransform # object. if self.transform: g.transform(self.transform) # Returning the WKT of the geometry. return g.wkt #### Other model methods #### def coord_transform(self): "Returns the coordinate transformation object." SpatialRefSys = self.spatial_backend.spatial_ref_sys() try: # Getting the target spatial reference system target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs # Creating the CoordTransform object return CoordTransform(self.source_srs, target_srs) except Exception as msg: new_msg = 'Could not translate between the data source and model geometry: %s' % msg six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2]) def geometry_field(self): "Returns the GeometryField instance associated with the geographic column." # Use `get_field()` on the model's options so that we # get the correct field instance if there's model inheritance. opts = self.model._meta return opts.get_field(self.geom_field) def make_multi(self, geom_type, model_field): """ Given the OGRGeomType for a geometry and its associated GeometryField, determine whether the geometry should be turned into a GeometryCollection. """ return (geom_type.num in self.MULTI_TYPES and model_field.__class__.__name__ == 'Multi%s' % geom_type.django) def save(self, verbose=False, fid_range=False, step=False, progress=False, silent=False, stream=sys.stdout, strict=False): """ Saves the contents from the OGR DataSource Layer into the database according to the mapping dictionary given at initialization. Keyword Parameters: verbose: If set, information will be printed subsequent to each model save executed on the database. fid_range: May be set with a slice or tuple of (begin, end) feature ID's to map from the data source. In other words, this keyword enables the user to selectively import a subset range of features in the geographic data source. step: If set with an integer, transactions will occur at every step interval. For example, if step=1000, a commit would occur after the 1,000th feature, the 2,000th feature etc. progress: When this keyword is set, status information will be printed giving the number of features processed and successfully saved. By default, progress information will pe printed every 1000 features processed, however, this default may be overridden by setting this keyword with an integer for the desired interval. stream: Status information will be written to this file handle. Defaults to using `sys.stdout`, but any object with a `write` method is supported. silent: By default, non-fatal error notifications are printed to stdout, but this keyword may be set to disable these notifications. strict: Execution of the model mapping will cease upon the first error encountered. The default behavior is to attempt to continue. """ # Getting the default Feature ID range. default_range = self.check_fid_range(fid_range) # Setting the progress interval, if requested. if progress: if progress is True or not isinstance(progress, int): progress_interval = 1000 else: progress_interval = progress def _save(feat_range=default_range, num_feat=0, num_saved=0): if feat_range: layer_iter = self.layer[feat_range] else: layer_iter = self.layer for feat in layer_iter: num_feat += 1 # Getting the keyword arguments try: kwargs = self.feature_kwargs(feat) except LayerMapError as msg: # Something borked the validation if strict: raise elif not silent: stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg)) else: # Constructing the model using the keyword args is_update = False if self.unique: # If we want unique models on a particular field, handle the # geometry appropriately. try: # Getting the keyword arguments and retrieving # the unique model. u_kwargs = self.unique_kwargs(kwargs) m = self.model.objects.using(self.using).get(**u_kwargs) is_update = True # Getting the geometry (in OGR form), creating # one from the kwargs WKT, adding in additional # geometries, and update the attribute with the # just-updated geometry WKT. geom = getattr(m, self.geom_field).ogr new = OGRGeometry(kwargs[self.geom_field]) for g in new: geom.add(g) setattr(m, self.geom_field, geom.wkt) except ObjectDoesNotExist: # No unique model exists yet, create. m = self.model(**kwargs) else: m = self.model(**kwargs) try: # Attempting to save. m.save(using=self.using) num_saved += 1 if verbose: stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m)) except Exception as msg: if strict: # Bailing out if the `strict` keyword is set. if not silent: stream.write( 'Failed to save the feature (id: %s) into the ' 'model with the keyword arguments:\n' % feat.fid ) stream.write('%s\n' % kwargs) raise elif not silent: stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg)) # Printing progress information, if requested. if progress and num_feat % progress_interval == 0: stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved)) # Only used for status output purposes -- incremental saving uses the # values returned here. return num_saved, num_feat if self.transaction_decorator is not None: _save = self.transaction_decorator(_save) nfeat = self.layer.num_feat if step and isinstance(step, int) and step < nfeat: # Incremental saving is requested at the given interval (step) if default_range: raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.') beg, num_feat, num_saved = (0, 0, 0) indices = range(step, nfeat, step) n_i = len(indices) for i, end in enumerate(indices): # Constructing the slice to use for this step; the last slice is # special (e.g, [100:] instead of [90:100]). if i + 1 == n_i: step_slice = slice(beg, None) else: step_slice = slice(beg, end) try: num_feat, num_saved = _save(step_slice, num_feat, num_saved) beg = end except: # Deliberately catch everything stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice)) raise else: # Otherwise, just calling the previously defined _save() function. _save()
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from itertools import chain import logging try: from greplin import scales except ImportError: raise ImportError( "The scales library is required for metrics support: " "https://pypi.python.org/pypi/scales") log = logging.getLogger(__name__) class Metrics(object): """ A collection of timers and counters for various performance metrics. Timer metrics are represented as floating point seconds. """ request_timer = None """ A :class:`greplin.scales.PmfStat` timer for requests. This is a dict-like object with the following keys: * count - number of requests that have been timed * min - min latency * max - max latency * mean - mean latency * stddev - standard deviation for latencies * median - median latency * 75percentile - 75th percentile latencies * 95percentile - 95th percentile latencies * 98percentile - 98th percentile latencies * 99percentile - 99th percentile latencies * 999percentile - 99.9th percentile latencies """ connection_errors = None """ A :class:`greplin.scales.IntStat` count of the number of times that a request to a Cassandra node has failed due to a connection problem. """ write_timeouts = None """ A :class:`greplin.scales.IntStat` count of write requests that resulted in a timeout. """ read_timeouts = None """ A :class:`greplin.scales.IntStat` count of read requests that resulted in a timeout. """ unavailables = None """ A :class:`greplin.scales.IntStat` count of write or read requests that failed due to an insufficient number of replicas being alive to meet the requested :class:`.ConsistencyLevel`. """ other_errors = None """ A :class:`greplin.scales.IntStat` count of all other request failures, including failures caused by invalid requests, bootstrapping nodes, overloaded nodes, etc. """ retries = None """ A :class:`greplin.scales.IntStat` count of the number of times a request was retried based on the :class:`.RetryPolicy` decision. """ ignores = None """ A :class:`greplin.scales.IntStat` count of the number of times a failed request was ignored based on the :class:`.RetryPolicy` decision. """ known_hosts = None """ A :class:`greplin.scales.IntStat` count of the number of nodes in the cluster that the driver is aware of, regardless of whether any connections are opened to those nodes. """ connected_to = None """ A :class:`greplin.scales.IntStat` count of the number of nodes that the driver currently has at least one connection open to. """ open_connections = None """ A :class:`greplin.scales.IntStat` count of the number connections the driver currently has open. """ _stats_counter = 0 def __init__(self, cluster_proxy): log.debug("Starting metric capture") self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter)) Metrics._stats_counter += 1 self.stats = scales.collection(self.stats_name, scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), scales.IntStat('read_timeouts'), scales.IntStat('unavailables'), scales.IntStat('other_errors'), scales.IntStat('retries'), scales.IntStat('ignores'), # gauges scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat('connected_to', lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), scales.Stat('open_connections', lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) # TODO, to be removed in 4.0 # /cassandra contains the metrics of the first cluster registered if 'cassandra' not in scales._Stats.stats: scales._Stats.stats['cassandra'] = scales._Stats.stats[self.stats_name] self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts self.read_timeouts = self.stats.read_timeouts self.unavailables = self.stats.unavailables self.other_errors = self.stats.other_errors self.retries = self.stats.retries self.ignores = self.stats.ignores self.known_hosts = self.stats.known_hosts self.connected_to = self.stats.connected_to self.open_connections = self.stats.open_connections def on_connection_error(self): self.stats.connection_errors += 1 def on_write_timeout(self): self.stats.write_timeouts += 1 def on_read_timeout(self): self.stats.read_timeouts += 1 def on_unavailable(self): self.stats.unavailables += 1 def on_other_error(self): self.stats.other_errors += 1 def on_ignore(self): self.stats.ignores += 1 def on_retry(self): self.stats.retries += 1 def get_stats(self): """ Returns the metrics for the registered cluster instance. """ return scales.getStats()[self.stats_name] def set_stats_name(self, stats_name): """ Set the metrics stats name. The stats_name is a string used to access the metris through scales: scales.getStats()[<stats_name>] Default is 'cassandra-<num>'. """ if self.stats_name == stats_name: return if stats_name in scales._Stats.stats: raise ValueError('"{0}" already exists in stats.'.format(stats_name)) stats = scales._Stats.stats[self.stats_name] del scales._Stats.stats[self.stats_name] self.stats_name = stats_name scales._Stats.stats[self.stats_name] = stats
# orm/path_registry.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Path tracking utilities, representing mapper graph traversals. """ from .. import inspection from .. import util from itertools import chain from .base import class_mapper def _unreduce_path(path): return PathRegistry.deserialize(path) class PathRegistry(object): """Represent query load paths and registry functions. Basically represents structures like: (<User mapper>, "orders", <Order mapper>, "items", <Item mapper>) These structures are generated by things like query options (joinedload(), subqueryload(), etc.) and are used to compose keys stored in the query._attributes dictionary for various options. They are then re-composed at query compile/result row time as the query is formed and as rows are fetched, where they again serve to compose keys to look up options in the context.attributes dictionary, which is copied from query._attributes. The path structure has a limited amount of caching, where each "root" ultimately pulls from a fixed registry associated with the first mapper, that also contains elements for each of its property keys. However paths longer than two elements, which are the exception rather than the rule, are generated on an as-needed basis. """ def __eq__(self, other): return other is not None and \ self.path == other.path def set(self, attributes, key, value): attributes[(key, self.path)] = value def setdefault(self, attributes, key, value): attributes.setdefault((key, self.path), value) def get(self, attributes, key, value=None): key = (key, self.path) if key in attributes: return attributes[key] else: return value def __len__(self): return len(self.path) @property def length(self): return len(self.path) def pairs(self): path = self.path for i in range(0, len(path), 2): yield path[i], path[i + 1] def contains_mapper(self, mapper): for path_mapper in [ self.path[i] for i in range(0, len(self.path), 2) ]: if path_mapper.is_mapper and \ path_mapper.isa(mapper): return True else: return False def contains(self, attributes, key): return (key, self.path) in attributes def __reduce__(self): return _unreduce_path, (self.serialize(), ) def serialize(self): path = self.path return list(zip( [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], [path[i].key for i in range(1, len(path), 2)] + [None] )) @classmethod def deserialize(cls, path): if path is None: return None p = tuple(chain(*[(class_mapper(mcls), class_mapper(mcls).attrs[key] if key is not None else None) for mcls, key in path])) if p and p[-1] is None: p = p[0:-1] return cls.coerce(p) @classmethod def per_mapper(cls, mapper): return EntityRegistry( cls.root, mapper ) @classmethod def coerce(cls, raw): return util.reduce(lambda prev, next: prev[next], raw, cls.root) @classmethod def token(cls, token): return TokenRegistry(cls.root, token) def __add__(self, other): return util.reduce( lambda prev, next: prev[next], other.path, self) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.path, ) class RootRegistry(PathRegistry): """Root registry, defers to mappers so that paths are maintained per-root-mapper. """ path = () def __getitem__(self, entity): return entity._path_registry PathRegistry.root = RootRegistry() class TokenRegistry(PathRegistry): def __init__(self, parent, token): self.token = token self.parent = parent self.path = parent.path + (token,) def __getitem__(self, entity): raise NotImplementedError() class PropRegistry(PathRegistry): def __init__(self, parent, prop): # restate this path in terms of the # given MapperProperty's parent. insp = inspection.inspect(parent[-1]) if not insp.is_aliased_class or insp._use_mapper_path: parent = parent.parent[prop.parent] elif insp.is_aliased_class and insp.with_polymorphic_mappers: if prop.parent is not insp.mapper and \ prop.parent in insp.with_polymorphic_mappers: subclass_entity = parent[-1]._entity_for_mapper(prop.parent) parent = parent.parent[subclass_entity] self.prop = prop self.parent = parent self.path = parent.path + (prop,) def __getitem__(self, entity): if isinstance(entity, (int, slice)): return self.path[entity] else: return EntityRegistry( self, entity ) class EntityRegistry(PathRegistry, dict): is_aliased_class = False def __init__(self, parent, entity): self.key = entity self.parent = parent self.is_aliased_class = entity.is_aliased_class self.path = parent.path + (entity,) def __bool__(self): return True __nonzero__ = __bool__ def __getitem__(self, entity): if isinstance(entity, (int, slice)): return self.path[entity] else: return dict.__getitem__(self, entity) def _inlined_get_for(self, prop, context, key): """an inlined version of: cls = path[mapperproperty].get(context, key) Skips the isinstance() check in __getitem__ and the extra method call for get(). Used by StrategizedProperty for its very frequent lookup. """ path = dict.__getitem__(self, prop) path_key = (key, path.path) if path_key in context.attributes: return context.attributes[path_key] else: return None def __missing__(self, key): self[key] = item = PropRegistry(self, key) return item
""" Test the decorators from ``testing.decorators``. """ from __future__ import division, absolute_import, print_function import warnings import pytest from numpy.testing import ( assert_, assert_raises, dec, SkipTest, KnownFailureException, ) try: import nose except ImportError: HAVE_NOSE = False else: HAVE_NOSE = True @pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") class TestNoseDecorators(object): # These tests are run in a class for simplicity while still # getting a report on each, skipped or success. class DidntSkipException(Exception): pass def test_slow(self): @dec.slow def slow_func(x, y, z): pass assert_(slow_func.slow) def test_setastest(self): @dec.setastest() def f_default(a): pass @dec.setastest(True) def f_istest(a): pass @dec.setastest(False) def f_isnottest(a): pass assert_(f_default.__test__) assert_(f_istest.__test__) assert_(not f_isnottest.__test__) def test_skip_functions_hardcoded(self): @dec.skipif(True) def f1(x): raise self.DidntSkipException try: f1('a') except self.DidntSkipException: raise Exception('Failed to skip') except SkipTest().__class__: pass @dec.skipif(False) def f2(x): raise self.DidntSkipException try: f2('a') except self.DidntSkipException: pass except SkipTest().__class__: raise Exception('Skipped when not expected to') def test_skip_functions_callable(self): def skip_tester(): return skip_flag == 'skip me!' @dec.skipif(skip_tester) def f1(x): raise self.DidntSkipException try: skip_flag = 'skip me!' f1('a') except self.DidntSkipException: raise Exception('Failed to skip') except SkipTest().__class__: pass @dec.skipif(skip_tester) def f2(x): raise self.DidntSkipException try: skip_flag = 'five is right out!' f2('a') except self.DidntSkipException: pass except SkipTest().__class__: raise Exception('Skipped when not expected to') def test_skip_generators_hardcoded(self): @dec.knownfailureif(True, "This test is known to fail") def g1(x): for i in range(x): yield i try: for j in g1(10): pass except KnownFailureException().__class__: pass else: raise Exception('Failed to mark as known failure') @dec.knownfailureif(False, "This test is NOT known to fail") def g2(x): for i in range(x): yield i raise self.DidntSkipException('FAIL') try: for j in g2(10): pass except KnownFailureException().__class__: raise Exception('Marked incorrectly as known failure') except self.DidntSkipException: pass def test_skip_generators_callable(self): def skip_tester(): return skip_flag == 'skip me!' @dec.knownfailureif(skip_tester, "This test is known to fail") def g1(x): for i in range(x): yield i try: skip_flag = 'skip me!' for j in g1(10): pass except KnownFailureException().__class__: pass else: raise Exception('Failed to mark as known failure') @dec.knownfailureif(skip_tester, "This test is NOT known to fail") def g2(x): for i in range(x): yield i raise self.DidntSkipException('FAIL') try: skip_flag = 'do not skip' for j in g2(10): pass except KnownFailureException().__class__: raise Exception('Marked incorrectly as known failure') except self.DidntSkipException: pass def test_deprecated(self): @dec.deprecated(True) def non_deprecated_func(): pass @dec.deprecated() def deprecated_func(): import warnings warnings.warn("TEST: deprecated func", DeprecationWarning) @dec.deprecated() def deprecated_func2(): import warnings warnings.warn("AHHHH") raise ValueError @dec.deprecated() def deprecated_func3(): import warnings warnings.warn("AHHHH") # marked as deprecated, but does not raise DeprecationWarning assert_raises(AssertionError, non_deprecated_func) # should be silent deprecated_func() with warnings.catch_warnings(record=True): warnings.simplefilter("always") # do not propagate unrelated warnings # fails if deprecated decorator just disables test. See #1453. assert_raises(ValueError, deprecated_func2) # warning is not a DeprecationWarning assert_raises(AssertionError, deprecated_func3) def test_parametrize(self): # dec.parametrize assumes that it is being run by nose. Because # we are running under pytest, we need to explicitly check the # results. @dec.parametrize('base, power, expected', [(1, 1, 1), (2, 1, 2), (2, 2, 4)]) def check_parametrize(base, power, expected): assert_(base**power == expected) count = 0 for test in check_parametrize(): test[0](*test[1:]) count += 1 assert_(count == 3)
""" Nose test running. This module implements ``test()`` and ``bench()`` functions for NumPy modules. """ from __future__ import division, absolute_import, print_function import os import sys import warnings import numpy as np from numpy.compat import basestring from .utils import import_nose, suppress_warnings def get_package_name(filepath): """ Given a path where a package is installed, determine its name. Parameters ---------- filepath : str Path to a file. If the determination fails, "numpy" is returned. Examples -------- >>> np.testing.nosetester.get_package_name('nonsense') 'numpy' """ fullpath = filepath[:] pkg_name = [] while 'site-packages' in filepath or 'dist-packages' in filepath: filepath, p2 = os.path.split(filepath) if p2 in ('site-packages', 'dist-packages'): break pkg_name.append(p2) # if package name determination failed, just default to numpy/scipy if not pkg_name: if 'scipy' in fullpath: return 'scipy' else: return 'numpy' # otherwise, reverse to get correct order and return pkg_name.reverse() # don't include the outer egg directory if pkg_name[0].endswith('.egg'): pkg_name.pop(0) return '.'.join(pkg_name) def run_module_suite(file_to_run=None, argv=None): """ Run a test module. Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from the command line Parameters ---------- file_to_run : str, optional Path to test module, or None. By default, run the module from which this function is called. argv : list of strings Arguments to be passed to the nose test runner. ``argv[0]`` is ignored. All command line arguments accepted by ``nosetests`` will work. If it is the default value None, sys.argv is used. .. versionadded:: 1.9.0 Examples -------- Adding the following:: if __name__ == "__main__" : run_module_suite(argv=sys.argv) at the end of a test module will run the tests when that module is called in the python interpreter. Alternatively, calling:: >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") from an interpreter will run all the test routine in 'test_matlib.py'. """ if file_to_run is None: f = sys._getframe(1) file_to_run = f.f_locals.get('__file__', None) if file_to_run is None: raise AssertionError if argv is None: argv = sys.argv + [file_to_run] else: argv = argv + [file_to_run] nose = import_nose() from .noseclasses import KnownFailurePlugin nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) class NoseTester(object): """ Nose test runner. This class is made available as numpy.testing.Tester, and a test function is typically added to a package's __init__.py like so:: from numpy.testing import Tester test = Tester().test Calling this test function finds and runs all tests associated with the package and all its sub-packages. Attributes ---------- package_path : str Full path to the package to test. package_name : str Name of the package to test. Parameters ---------- package : module, str or None, optional The package to test. If a string, this should be the full path to the package. If None (default), `package` is set to the module from which `NoseTester` is initialized. raise_warnings : None, str or sequence of warnings, optional This specifies which warnings to configure as 'raise' instead of being shown once during the test execution. Valid strings are: - "develop" : equals ``(Warning,)`` - "release" : equals ``()``, don't raise on any warnings. Default is "release". depth : int, optional If `package` is None, then this can be used to initialize from the module of the caller of (the caller of (...)) the code that initializes `NoseTester`. Default of 0 means the module of the immediate caller; higher values are useful for utility routines that want to initialize `NoseTester` objects on behalf of other code. """ def __init__(self, package=None, raise_warnings="release", depth=0): # Back-compat: 'None' used to mean either "release" or "develop" # depending on whether this was a release or develop version of # numpy. Those semantics were fine for testing numpy, but not so # helpful for downstream 01-codes like scipy that use # numpy.testing. (They want to set this based on whether *they* are a # release or develop version, not whether numpy is.) So we continue to # accept 'None' for back-compat, but it's now just an alias for the # default "release". if raise_warnings is None: raise_warnings = "release" package_name = None if package is None: f = sys._getframe(1 + depth) package_path = f.f_locals.get('__file__', None) if package_path is None: raise AssertionError package_path = os.path.dirname(package_path) package_name = f.f_locals.get('__name__', None) elif isinstance(package, type(os)): package_path = os.path.dirname(package.__file__) package_name = getattr(package, '__name__', None) else: package_path = str(package) self.package_path = package_path # Find the package name under test; this name is used to limit coverage # reporting (if enabled). if package_name is None: package_name = get_package_name(package_path) self.package_name = package_name # Set to "release" in constructor in maintenance branches. self.raise_warnings = raise_warnings def _test_argv(self, label, verbose, extra_argv): ''' Generate argv for nosetest command Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional see ``test`` docstring verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- argv : list command line arguments that will be passed to nose ''' argv = [__file__, self.package_path, '-s'] if label and label != 'full': if not isinstance(label, basestring): raise TypeError('Selection label should be a string') if label == 'fast': label = 'not slow' argv += ['-A', label] argv += ['--verbosity', str(verbose)] # When installing with setuptools, and also in some other cases, the # test_*.py files end up marked +x executable. Nose, by default, does # not run files marked with +x as they might be scripts. However, in # our case nose only looks for test_*.py files under the package # directory, which should be safe. argv += ['--exe'] if extra_argv: argv += extra_argv return argv def _show_system_info(self): nose = import_nose() import numpy print("NumPy version %s" % numpy.__version__) relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous print("NumPy relaxed strides checking option:", relaxed_strides) npdir = os.path.dirname(numpy.__file__) print("NumPy is installed in %s" % npdir) if 'scipy' in self.package_name: import scipy print("SciPy version %s" % scipy.__version__) spdir = os.path.dirname(scipy.__file__) print("SciPy is installed in %s" % spdir) pyversion = sys.version.replace('\n', '') print("Python version %s" % pyversion) print("nose version %d.%d.%d" % nose.__versioninfo__) def _get_custom_doctester(self): """ Return instantiated plugin for doctests Allows subclassing of this class to override doctester A return value of None means use the nose builtin doctest plugin """ from .noseclasses import NumpyDoctest return NumpyDoctest() def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): """ Run tests for module using nose. This method does the heavy lifting for the `test` method. It takes all the same arguments, for details see `test`. See Also -------- test """ # fail with nice error message if nose is not present import_nose() # compile argv argv = self._test_argv(label, verbose, extra_argv) # our way of doing coverage if coverage: argv += ['--cover-package=%s' % self.package_name, '--with-coverage', '--cover-tests', '--cover-erase'] # construct list of plugins import nose.plugins.builtin from .noseclasses import KnownFailurePlugin, Unplugger plugins = [KnownFailurePlugin()] plugins += [p() for p in nose.plugins.builtin.plugins] # add doctesting if required doctest_argv = '--with-doctest' in argv if doctests == False and doctest_argv: doctests = True plug = self._get_custom_doctester() if plug is None: # use standard doctesting if doctests and not doctest_argv: argv += ['--with-doctest'] else: # custom doctesting if doctest_argv: # in fact the unplugger would take care of this argv.remove('--with-doctest') plugins += [Unplugger('doctest'), plug] if doctests: argv += ['--with-' + plug.name] return argv, plugins def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False, raise_warnings=None): """ Run tests for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the tests to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow tests as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. doctests : bool, optional If True, run doctests in module. Default is False. coverage : bool, optional If True, report coverage of NumPy code. Default is False. (This requires the `coverage module: <http://nedbatchelder.com/code/modules/coverage.html>`_). raise_warnings : None, str or sequence of warnings, optional This specifies which warnings to configure as 'raise' instead of being shown once during the test execution. Valid strings are: - "develop" : equals ``(Warning,)`` - "release" : equals ``()``, don't raise on any warnings. The default is to use the class initialization value. Returns ------- result : object Returns the result of running the tests as a ``nose.result.TextTestResult`` object. Notes ----- Each NumPy module exposes `test` in its namespace to run all tests for it. For example, to run all tests for numpy.lib: >>> np.lib.test() #doctest: +SKIP Examples -------- >>> result = np.lib.test() #doctest: +SKIP Running unit tests for numpy.lib ... Ran 976 tests in 3.933s OK >>> result.errors #doctest: +SKIP [] >>> result.knownfail #doctest: +SKIP [] """ # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) from . import utils utils.verbose = verbose if doctests: print("Running unit tests and doctests for %s" % self.package_name) else: print("Running unit tests for %s" % self.package_name) self._show_system_info() # reset doctest state on every run import doctest doctest.master = None if raise_warnings is None: raise_warnings = self.raise_warnings _warn_opts = dict(develop=(Warning,), release=()) if isinstance(raise_warnings, basestring): raise_warnings = _warn_opts[raise_warnings] with suppress_warnings("location") as sup: # Reset the warning filters to the default state, # so that running the tests is more repeatable. warnings.resetwarnings() # Set all warnings to 'warn', this is because the default 'once' # has the bad property of possibly shadowing later warnings. warnings.filterwarnings('always') # Force the requested warnings to raise for warningtype in raise_warnings: warnings.filterwarnings('error', category=warningtype) # Filter out annoying import messages. sup.filter(message='Not importing directory') sup.filter(message="numpy.dtype size changed") sup.filter(message="numpy.ufunc size changed") sup.filter(category=np.ModuleDeprecationWarning) # Filter out boolean '-' deprecation messages. This allows # older versions of scipy to test without a flood of messages. sup.filter(message=".*boolean negative.*") sup.filter(message=".*boolean subtract.*") # Filter out distutils cpu warnings (could be localized to # distutils tests). ASV has problems with top level import, # so fetch module for suppression here. with warnings.catch_warnings(): warnings.simplefilter("always") from ..distutils import cpuinfo sup.filter(category=UserWarning, module=cpuinfo) # See #7949: Filter out deprecation warnings due to the -3 flag to # python 2 if sys.version_info.major == 2 and sys.py3kwarning: # This is very specific, so using the fragile module filter # is fine import threading sup.filter(DeprecationWarning, r"sys\.exc_clear\(\) not supported in 3\.x", module=threading) sup.filter(DeprecationWarning, message="in 3\.x, __setslice__") sup.filter(DeprecationWarning, message="in 3\.x, __getslice__") sup.filter(DeprecationWarning, message="buffer\(\) not supported in 3\.x") sup.filter(DeprecationWarning, message="CObject type is not supported in 3\.x") sup.filter(DeprecationWarning, message="comparing unequal types not supported in 3\.x") # Filter out some deprecation warnings inside nose 1.3.7 when run # on python 3.5b2. See # https://github.com/nose-devs/nose/issues/929 # Note: it is hard to filter based on module for sup (lineno could # be implemented). warnings.filterwarnings("ignore", message=".*getargspec.*", category=DeprecationWarning, module="nose\.") from .noseclasses import NumpyTestProgram argv, plugins = self.prepare_test_args( label, verbose, extra_argv, doctests, coverage) t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) return t.result def bench(self, label='fast', verbose=1, extra_argv=None): """ Run benchmarks for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the benchmarks to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow benchmarks as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for benchmark outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- success : bool Returns True if running the benchmarks works, False if an error occurred. Notes ----- Benchmarks are like tests, but have names starting with "bench" instead of "test", and can be found under the "benchmarks" sub-directory of the module. Each NumPy module exposes `bench` in its namespace to run all benchmarks for it. Examples -------- >>> success = np.lib.bench() #doctest: +SKIP Running benchmarks for numpy.lib ... using 562341 items: unique: 0.11 unique1d: 0.11 ratio: 1.0 nUnique: 56230 == 56230 ... OK >>> success #doctest: +SKIP True """ print("Running benchmarks for %s" % self.package_name) self._show_system_info() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] # import nose or make informative error nose = import_nose() # get plugin to disable doctests from .noseclasses import Unplugger add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins) def _numpy_tester(): if hasattr(np, "__version__") and ".dev0" in np.__version__: mode = "develop" else: mode = "release" return NoseTester(raise_warnings=mode, depth=1)
# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['eseries', 'esval', 'esspec', 'estidy'] import numpy as np import scipy.sparse as sp from qutip.qobj import Qobj class eseries(): """ Class representation of an exponential-series expansion of time-dependent quantum objects. Attributes ---------- ampl : ndarray Array of amplitudes for exponential series. rates : ndarray Array of rates for exponential series. dims : list Dimensions of exponential series components shape : list Shape corresponding to exponential series components Methods ------- value(tlist) Evaluate an exponential series at the times listed in tlist spec(wlist) Evaluate the spectrum of an exponential series at frequencies in wlist. tidyup() Returns a tidier version of the exponential series """ __array_priority__ = 101 def __init__(self, q=np.array([], dtype=object), s=np.array([])): if isinstance(s, (int, float, complex)): s = np.array([s]) if (not np.any(np.asarray(q, dtype=object))) and (len(s) == 0): self.ampl = np.array([]) self.rates = np.array([]) self.dims = [[1, 1]] self.shape = [1, 1] elif np.any(np.asarray(q, dtype=object)) and (len(s) == 0): if isinstance(q, eseries): self.ampl = q.ampl self.rates = q.rates self.dims = q.dims self.shape = q.shape elif isinstance(q, (np.ndarray, list)): ind = np.shape(q) num = ind[0] # number of elements in q sh = np.array([Qobj(x).shape for x in q]) if any(sh != sh[0]): raise TypeError('All amplitudes must have same dimension.') self.ampl = np.array([x for x in q]) self.rates = np.zeros(ind) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape elif isinstance(q, Qobj): qo = Qobj(q) self.ampl = np.array([qo]) self.rates = np.array([0]) self.dims = qo.dims self.shape = qo.shape else: self.ampl = np.array([q]) self.rates = np.array([0]) self.dims = [[1, 1]] self.shape = [1, 1] elif np.any(np.asarray(q, dtype=object)) and len(s) != 0: if isinstance(q, (np.ndarray, list)): q = np.asarray(q, dtype=object) ind = np.shape(q) num = ind[0] sh = np.array([Qobj(q[x]).shape for x in range(0, num)]) if np.any(sh != sh[0]): raise TypeError('All amplitudes must have same dimension.') self.ampl = np.array([Qobj(q[x]) for x in range(0, num)], dtype=object) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape else: num = 1 self.ampl = np.array([Qobj(q)], dtype=object) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape if isinstance(s, (int, complex, float)): if num != 1: raise TypeError('Number of rates must match number ' + 'of members in object array.') self.rates = np.array([s]) elif isinstance(s, (np.ndarray, list)): if len(s) != num: raise TypeError('Number of rates must match number ' + ' of members in object array.') self.rates = np.array(s) if len(self.ampl) != 0: # combine arrays so that they can be sorted together zipped = list(zip(self.rates, self.ampl)) zipped.sort() # sort rates from lowest to highest rates, ampl = list(zip(*zipped)) # get back rates and ampl self.ampl = np.array(ampl, dtype=object) self.rates = np.array(rates) def __str__(self): # string of ESERIES information self.tidyup() s = "ESERIES object: " + str(len(self.ampl)) + " terms\n" s += "Hilbert space dimensions: " + str(self.dims) + "\n" for k in range(0, len(self.ampl)): s += "Exponent #" + str(k) + " = " + str(self.rates[k]) + "\n" if isinstance(self.ampl[k], sp.spmatrix): s += str(self.ampl[k]) + "\n" else: s += str(self.ampl[k]) + "\n" return s def __repr__(self): return self.__str__() # Addition with ESERIES on left (ex. ESERIES+5) def __add__(self, other): right = eseries(other) if self.dims != right.dims: raise TypeError("Incompatible operands for ESERIES addition") out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = np.append(self.ampl, right.ampl) out.rates = np.append(self.rates, right.rates) return out # Addition with ESERIES on right(ex. 5+ESERIES) def __radd__(self, other): return self + other # define negation of ESERIES def __neg__(self): out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = -self.ampl out.rates = self.rates return out # Subtraction with ESERIES on left (ex. ESERIES-5) def __sub__(self, other): return self + (-other) # Subtraction with ESERIES on right (ex. 5-ESERIES) def __rsub__(self, other): return other + (-self) # Multiplication with ESERIES on left (ex. ESERIES*other) def __mul__(self, other): if isinstance(other, eseries): out = eseries() out.dims = self.dims out.shape = self.shape for i in range(len(self.rates)): for j in range(len(other.rates)): out += eseries(self.ampl[i] * other.ampl[j], self.rates[i] + other.rates[j]) return out else: out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = self.ampl * other out.rates = self.rates return out # Multiplication with ESERIES on right (ex. other*ESERIES) def __rmul__(self, other): out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = other * self.ampl out.rates = self.rates return out # # todo: # select_ampl, select_rate: functions to select some terms given the ampl # or rate. This is done with {ampl} or (rate) in qotoolbox. we should use # functions with descriptive names for this. # # # evaluate the eseries for a list of times # def value(self, tlist): """ Evaluates an exponential series at the times listed in ``tlist``. Parameters ---------- tlist : ndarray Times at which to evaluate exponential series. Returns ------- val_list : ndarray Values of exponential at times in ``tlist``. """ if self.ampl is None or len(self.ampl) == 0: # no terms, evalue to zero return np.zeros(np.shape(tlist)) if isinstance(tlist, float) or isinstance(tlist, int): tlist = [tlist] if isinstance(self.ampl[0], Qobj): # amplitude vector contains quantum objects val_list = [] for j in range(len(tlist)): exp_factors = np.exp(np.array(self.rates) * tlist[j]) val = 0 for i in range(len(self.ampl)): val += self.ampl[i] * exp_factors[i] val_list.append(val) val_list = np.array(val_list, dtype=object) else: # the amplitude vector contains c numbers val_list = np.zeros(np.size(tlist), dtype=complex) for j in range(len(tlist)): exp_factors = np.exp(np.array(self.rates) * tlist[j]) val_list[j] = np.sum(np.dot(self.ampl, exp_factors)) if all(np.imag(val_list) == 0): val_list = np.real(val_list) if len(tlist) == 1: return val_list[0] else: return val_list def spec(self, wlist): """ Evaluate the spectrum of an exponential series at frequencies in ``wlist``. Parameters ---------- wlist : array_like Array/list of frequenies. Returns ------- val_list : ndarray Values of exponential series at frequencies in ``wlist``. """ val_list = np.zeros(np.size(wlist)) for i in range(len(wlist)): val_list[i] = 2 * np.real( np.dot(self.ampl, 1. / (1.0j * wlist[i] - self.rates))) return val_list def tidyup(self, *args): """ Returns a tidier version of exponential series. """ # # combine duplicate entries (same rate) # rate_tol = 1e-10 ampl_tol = 1e-10 ampl_dict = {} unique_rates = {} ur_len = 0 for r_idx in range(len(self.rates)): # look for a matching rate in the list of unique rates idx = -1 for ur_key in unique_rates.keys(): if abs(self.rates[r_idx] - unique_rates[ur_key]) < rate_tol: idx = ur_key break if idx == -1: # no matching rate, add it unique_rates[ur_len] = self.rates[r_idx] ampl_dict[ur_len] = [self.ampl[r_idx]] ur_len = len(unique_rates) else: # found matching rate, append amplitude to its list ampl_dict[idx].append(self.ampl[r_idx]) # create new amplitude and rate list with only unique rates, and # nonzero amplitudes self.rates = np.array([]) self.ampl = np.array([]) for ur_key in unique_rates.keys(): total_ampl = np.sum(np.asarray(ampl_dict[ur_key], dtype=object)) if (isinstance(total_ampl, float) or isinstance(total_ampl, complex)): if abs(total_ampl) > ampl_tol: self.rates = np.append(self.rates, unique_rates[ur_key]) self.ampl = np.append(self.ampl, total_ampl) else: if abs(total_ampl.full()).max() > ampl_tol: self.rates = np.append(self.rates, unique_rates[ur_key]) self.ampl = np.append(self.ampl, np.asarray(total_ampl, dtype=object)) return self # ----------------------------------------------------------------------------- # # wrapper functions for accessing the class methods (for compatibility with # quantum optics toolbox) # def esval(es, tlist): """ Evaluates an exponential series at the times listed in ``tlist``. Parameters ---------- tlist : ndarray Times at which to evaluate exponential series. Returns ------- val_list : ndarray Values of exponential at times in ``tlist``. """ return es.value(tlist) def esspec(es, wlist): """Evaluate the spectrum of an exponential series at frequencies in ``wlist``. Parameters ---------- wlist : array_like Array/list of frequenies. Returns ------- val_list : ndarray Values of exponential series at frequencies in ``wlist``. """ return es.spec(wlist) def estidy(es, *args): """ Returns a tidier version of exponential series. """ return es.tidyup()
#!/usr/bin/python """ Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import gzip import json import logging import os import subprocess from selenium import webdriver from wpt_test_info import WptTest from etw import ETW from recorder import WptRecord PAGE_DATA_SCRIPT = """ var pageData = {}; var domCount = document.documentElement.getElementsByTagName("*").length; if (domCount === undefined) domCount = 0; pageData["domElements"] = domCount; function addTime(name, field) { if (field == undefined) field = name; try { if (window.performance.timing[field] > 0) { pageData[name] = Math.max(0, Math.round(window.performance.timing[field] - window.performance.timing["navigationStart"])); } } catch(e) {} }; addTime("domInteractive"); addTime("domContentLoadedEventStart"); addTime("domContentLoadedEventEnd"); addTime("loadEventStart"); addTime("loadEventEnd"); addTime("firstPaint", "msFirstPaint"); return pageData; """ USER_TIMING_SCRIPT = """ var m = []; try { var marks = window.performance.getEntriesByType("mark"); if (marks.length) { for (var i = 0; i < marks.length; i++) m.push({"type": "mark", "entryType": marks[i].entryType, "name": marks[i].name, "startTime": marks[i].startTime}); } } catch(e) {}; try { var measures = window.performance.getEntriesByType("measure"); if (measures.length) { for (var i = 0; i < measures.length; i++) m.push({"type": "measure", "entryType": measures[i].entryType, "name": measures[i].name, "startTime": measures[i].startTime, "duration": measures[i].duration}); } } catch(e) {}; return m; """ def RunTest(driver, test): global PAGE_DATA_SCRIPT global USER_TIMING_SCRIPT # Set up the timeouts and other options driver.set_page_load_timeout(test.GetTimeout()) driver.set_window_position(0, 0, driver.current_window_handle) driver.set_window_size(test.BrowserWidth(), test.BrowserHeight(), driver.current_window_handle) # Prepare the recorder recorder = WptRecord() recorder.Prepare(test) #start ETW logging etw = ETW() etw_file = test.GetFileETW() try: etw.Start(etw_file) except: pass # Start Recording recorder.Start() # Run through all of the script commands (just navigate for now but placeholder) while not test.Done(): action = test.GetNextCommand() try: if action['command'] == 'navigate': driver.get(action['target']) except: pass # Wait for idle if it is not an onload-ending test if not test.EndAtOnLoad(): recorder.WaitForIdle(30) # Stop Recording recorder.Stop() try: etw.Stop() except: pass # Pull metrics from the DOM dom_data = None try: dom_data = driver.execute_script(PAGE_DATA_SCRIPT) logging.debug('Navigation Timing: {0}'.format(json.dumps(dom_data))) except: pass # check for any user timing marks or measures try: user_timing_file = test.GetFileUserTiming() if user_timing_file is not None: if os.path.exists(user_timing_file): os.unlink(user_timing_file) if os.path.exists(user_timing_file + '.gz'): os.unlink(user_timing_file + '.gz') user_timing = driver.execute_script(USER_TIMING_SCRIPT) if user_timing is not None: with gzip.open(user_timing_file + '.gz', 'wb') as f: json.dump(user_timing, f) except: pass # collect custom metrics try: custom_metric_scripts = test.GetCustomMetrics() custom_metrics_file = test.GetFileCustomMetrics() if custom_metric_scripts is not None and custom_metrics_file is not None: if os.path.exists(custom_metrics_file): os.unlink(custom_metrics_file) if os.path.exists(custom_metrics_file + '.gz'): os.unlink(custom_metrics_file + '.gz') custom_metrics = None for metric in custom_metric_scripts: script = custom_metric_scripts[metric] result = driver.execute_script(script) if result is not None: if custom_metrics is None: custom_metrics = {} custom_metrics[metric] = result if custom_metrics is not None: with gzip.open(custom_metrics_file + '.gz', 'wb') as f: json.dump(custom_metrics, f) except: pass # grab a screen shot try: png = test.GetScreenshotPNG() if png is not None: if os.path.exists(png): os.unlink(png) driver.get_screenshot_as_file(png) jpeg = test.GetScreenshotJPEG() quality = test.GetImageQuality() if jpeg is not None and os.path.exists(png): command = 'magick "{0}" -set colorspace sRGB -quality {1:d} "{2}"'.format(png, quality, jpeg) subprocess.call(command, shell=True) if os.path.exists(jpeg) and not test.KeepPNG(): os.unlink(png) except: pass # process the etw trace start_offset = 0 try: start_offset = etw.Write(test, dom_data) except: pass if os.path.exists(etw_file): os.unlink(etw_file) # Process the recording print('Processing video capture') recorder.Process(start_offset) recorder.Done() def main(): import argparse parser = argparse.ArgumentParser(description='Chrome trace parser.', prog='trace-parser') parser.add_argument('-v', '--verbose', action='count', help="Increase verbosity (specify multiple times for more). -vvvv for full debug output.") parser.add_argument('-t', '--test', help="Input test json file.") parser.add_argument('-r', '--recorder', help="Path to wptrecord.exe for recording video, tcpdump, etc.") options, unknown = parser.parse_known_args() # Set up logging log_level = logging.CRITICAL if options.verbose == 1: log_level = logging.ERROR elif options.verbose == 2: log_level = logging.WARNING elif options.verbose == 3: log_level = logging.INFO elif options.verbose >= 4: log_level = logging.DEBUG logging.basicConfig(level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S") if not options.test: parser.error("Input test file is not specified.") test = WptTest(options.test) if options.recorder: test.SetRecorder(options.recorder) #Start the browser exe = os.path.join(os.path.dirname(os.path.abspath(__file__)), "edge/MicrosoftWebDriver.exe") driver = webdriver.Edge(executable_path=exe) driver.get("about:blank") RunTest(driver, test) #quit the browser driver.quit() if '__main__' == __name__: # import cProfile # cProfile.run('main()', None, 2) main()
# apis_v1/views/views_position.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from config.base import get_environment_variable from django.http import HttpResponse import json from ballot.controllers import figure_out_google_civic_election_id_voter_is_watching from ballot.models import OFFICE, CANDIDATE, MEASURE from position.controllers import position_list_for_ballot_item_for_api, \ position_list_for_ballot_item_from_friends_for_api, \ position_list_for_opinion_maker_for_api, \ position_list_for_voter_for_api, \ position_retrieve_for_api, position_save_for_api from position.models import ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING, \ FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC from support_oppose_deciding.controllers import position_oppose_count_for_ballot_item_for_api, \ position_support_count_for_ballot_item_for_api, \ position_public_oppose_count_for_ballot_item_for_api, \ position_public_support_count_for_ballot_item_for_api import wevote_functions.admin from wevote_functions.functions import convert_to_bool, get_voter_device_id, \ is_speaker_type_organization, is_speaker_type_public_figure, positive_value_exists logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL") def position_list_for_ballot_item_view(request): # positionListForBallotItem """ :param request: :return: """ stance = request.GET.get('stance', ANY_STANCE) if stance in (ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING): stance_we_are_looking_for = stance else: stance_we_are_looking_for = ANY_STANCE kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', "") private_citizens_only = positive_value_exists(request.GET.get('private_citizens_only', False)) if kind_of_ballot_item == OFFICE: office_id = ballot_item_id office_we_vote_id = ballot_item_we_vote_id candidate_id = 0 candidate_we_vote_id = '' measure_id = 0 measure_we_vote_id = '' elif kind_of_ballot_item == CANDIDATE: office_id = 0 office_we_vote_id = '' candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id measure_id = 0 measure_we_vote_id = '' elif kind_of_ballot_item == MEASURE: office_id = 0 office_we_vote_id = '' candidate_id = 0 candidate_we_vote_id = '' measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id else: office_id = 0 office_we_vote_id = '' candidate_id = 0 candidate_we_vote_id = '' measure_id = 0 measure_we_vote_id = '' return position_list_for_ballot_item_for_api(office_id=office_id, office_we_vote_id=office_we_vote_id, candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id, stance_we_are_looking_for=stance_we_are_looking_for, private_citizens_only=private_citizens_only) def position_list_for_ballot_item_from_friends_view(request): # positionListForBallotItemFromFriends """ :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id stance = request.GET.get('stance', ANY_STANCE) if stance in (ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING): stance_we_are_looking_for = stance else: stance_we_are_looking_for = ANY_STANCE friends_vs_public_incoming = request.GET.get('friends_vs_public', FRIENDS_AND_PUBLIC) if friends_vs_public_incoming in (FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC): friends_vs_public = friends_vs_public_incoming else: friends_vs_public = FRIENDS_AND_PUBLIC kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', "") candidate_id = 0 candidate_we_vote_id = '' measure_id = 0 measure_we_vote_id = '' office_id = 0 office_we_vote_id = '' if kind_of_ballot_item == OFFICE: office_id = ballot_item_id office_we_vote_id = ballot_item_we_vote_id elif kind_of_ballot_item == CANDIDATE: candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id elif kind_of_ballot_item == MEASURE: measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id return position_list_for_ballot_item_from_friends_for_api( voter_device_id=voter_device_id, friends_vs_public=friends_vs_public, office_id=office_id, office_we_vote_id=office_we_vote_id, candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id, stance_we_are_looking_for=stance_we_are_looking_for) def position_list_for_opinion_maker_view(request): # positionListForOpinionMaker """ :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id stance = request.GET.get('stance', ANY_STANCE) if stance in (ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING): stance_we_are_looking_for = stance else: stance_we_are_looking_for = ANY_STANCE friends_vs_public_incoming = request.GET.get('friends_vs_public', ANY_STANCE) if friends_vs_public_incoming in (FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC): friends_vs_public = friends_vs_public_incoming else: friends_vs_public = FRIENDS_AND_PUBLIC kind_of_opinion_maker = request.GET.get('kind_of_opinion_maker', "") opinion_maker_id = request.GET.get('opinion_maker_id', 0) opinion_maker_we_vote_id = request.GET.get('opinion_maker_we_vote_id', "") google_civic_election_id = request.GET.get('google_civic_election_id', '') # The Position tables use str state_code = request.GET.get('state_code', "") filter_for_voter = positive_value_exists(request.GET.get('filter_for_voter', True)) filter_out_voter = positive_value_exists(request.GET.get('filter_out_voter', False)) # Make sure filter_for_voter is reset to False if filter_out_voter is true filter_for_voter = False if filter_out_voter else filter_for_voter if is_speaker_type_organization(kind_of_opinion_maker): organization_id = opinion_maker_id organization_we_vote_id = opinion_maker_we_vote_id public_figure_id = 0 public_figure_we_vote_id = '' elif is_speaker_type_public_figure(kind_of_opinion_maker): organization_id = 0 organization_we_vote_id = '' public_figure_id = opinion_maker_id public_figure_we_vote_id = opinion_maker_we_vote_id else: organization_id = 0 organization_we_vote_id = '' public_figure_id = 0 public_figure_we_vote_id = '' json_data = position_list_for_opinion_maker_for_api( voter_device_id=voter_device_id, organization_id=organization_id, organization_we_vote_id=organization_we_vote_id, public_figure_id=public_figure_id, public_figure_we_vote_id=public_figure_we_vote_id, friends_vs_public=friends_vs_public, stance_we_are_looking_for=stance_we_are_looking_for, filter_for_voter=filter_for_voter, filter_out_voter=filter_out_voter, google_civic_election_id=google_civic_election_id, state_code=state_code) return HttpResponse(json.dumps(json_data), content_type='application/json') def position_list_for_voter_view(request): # positionListForVoter """ :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id stance = request.GET.get('stance', ANY_STANCE) if stance in (ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING): stance_we_are_looking_for = stance else: stance_we_are_looking_for = ANY_STANCE friends_vs_public_incoming = request.GET.get('friends_vs_public', ANY_STANCE) if friends_vs_public_incoming in (FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC): friends_vs_public = friends_vs_public_incoming else: friends_vs_public = FRIENDS_AND_PUBLIC google_civic_election_id = request.GET.get('google_civic_election_id', 0) state_code = request.GET.get('state_code', "") show_only_this_election = request.GET.get('show_only_this_election', True) show_only_this_election = positive_value_exists(show_only_this_election) show_all_other_elections = request.GET.get('show_all_other_elections', False) show_all_other_elections = positive_value_exists(show_all_other_elections) # Make sure show_only_this_election is reset to False if filter_out_voter is true show_only_this_election = False if show_all_other_elections else show_only_this_election if show_only_this_election or show_all_other_elections and not positive_value_exists(google_civic_election_id): results = figure_out_google_civic_election_id_voter_is_watching(voter_device_id) google_civic_election_id = results['google_civic_election_id'] return position_list_for_voter_for_api(voter_device_id=voter_device_id, friends_vs_public=friends_vs_public, stance_we_are_looking_for=stance_we_are_looking_for, show_only_this_election=show_only_this_election, show_all_other_elections=show_all_other_elections, google_civic_election_id=google_civic_election_id, state_code=state_code) def position_retrieve_view(request): """ Retrieve all of the details about a single position based on unique identifier (positionRetrieve) :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id position_we_vote_id = request.GET.get('position_we_vote_id', '') return position_retrieve_for_api( position_we_vote_id=position_we_vote_id, voter_device_id=voter_device_id ) def position_save_view(request): # positionSave """ Save a single position :param request: :return: """ # We set values that aren't passed in, to False so we know to treat them as null or unchanged. This allows us to # only change the values we want to voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id position_we_vote_id = request.GET.get('position_we_vote_id', False) organization_we_vote_id = request.GET.get('organization_we_vote_id', False) public_figure_we_vote_id = request.GET.get('public_figure_we_vote_id', False) voter_we_vote_id = request.GET.get('voter_we_vote_id', False) google_civic_election_id = request.GET.get('google_civic_election_id', False) ballot_item_display_name = request.GET.get('ballot_item_display_name', False) office_we_vote_id = request.GET.get('office_we_vote_id', False) candidate_we_vote_id = request.GET.get('candidate_we_vote_id', False) measure_we_vote_id = request.GET.get('measure_we_vote_id', False) stance = request.GET.get('stance', False) set_as_public_position = request.GET.get('set_as_public_position', True) statement_text = request.GET.get('statement_text', False) statement_html = request.GET.get('statement_html', False) more_info_url = request.GET.get('more_info_url', False) results = position_save_for_api( voter_device_id=voter_device_id, position_we_vote_id=position_we_vote_id, organization_we_vote_id=organization_we_vote_id, public_figure_we_vote_id=public_figure_we_vote_id, voter_we_vote_id=voter_we_vote_id, google_civic_election_id=google_civic_election_id, ballot_item_display_name=ballot_item_display_name, office_we_vote_id=office_we_vote_id, candidate_we_vote_id=candidate_we_vote_id, measure_we_vote_id=measure_we_vote_id, stance=stance, set_as_public_position=set_as_public_position, statement_text=statement_text, statement_html=statement_html, more_info_url=more_info_url, ) return HttpResponse(json.dumps(results), content_type='application/json') def position_oppose_count_for_ballot_item_view(request): """ Retrieve the number of orgs and friends that oppose this (positionOpposeCountForBallotItem) :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None) if kind_of_ballot_item == CANDIDATE: candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id measure_id = 0 measure_we_vote_id = None elif kind_of_ballot_item == MEASURE: candidate_id = 0 candidate_we_vote_id = None measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id else: candidate_id = 0 candidate_we_vote_id = None measure_id = 0 measure_we_vote_id = None return position_oppose_count_for_ballot_item_for_api( voter_device_id=voter_device_id, candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id) def position_support_count_for_ballot_item_view(request): """ Retrieve the number of orgs and friends that support this (positionSupportCountForBallotItem) :param request: :return: """ voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None) if kind_of_ballot_item == CANDIDATE: candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id measure_id = 0 measure_we_vote_id = None elif kind_of_ballot_item == MEASURE: candidate_id = 0 candidate_we_vote_id = None measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id else: candidate_id = 0 candidate_we_vote_id = None measure_id = 0 measure_we_vote_id = None return position_support_count_for_ballot_item_for_api( voter_device_id=voter_device_id, candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id) def position_public_oppose_count_for_ballot_item_view(request): """ Retrieve the number of orgs and public figures that publicly oppose this (positionPublicOpposeCountForBallotItem) :param request: :return: """ kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None) if kind_of_ballot_item == CANDIDATE: candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id measure_id = 0 measure_we_vote_id = None elif kind_of_ballot_item == MEASURE: candidate_id = 0 candidate_we_vote_id = None measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id else: candidate_id = 0 candidate_we_vote_id = None measure_id = 0 measure_we_vote_id = None return position_public_oppose_count_for_ballot_item_for_api( candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id) def position_public_support_count_for_ballot_item_view(request): """ Retrieve the number of orgs and public figures that publicly support this (positionPublicSupportCountForBallotItem) :param request: :return: """ kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "") ballot_item_id = request.GET.get('ballot_item_id', 0) ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None) if kind_of_ballot_item == CANDIDATE: candidate_id = ballot_item_id candidate_we_vote_id = ballot_item_we_vote_id measure_id = 0 measure_we_vote_id = None elif kind_of_ballot_item == MEASURE: candidate_id = 0 candidate_we_vote_id = None measure_id = ballot_item_id measure_we_vote_id = ballot_item_we_vote_id else: candidate_id = 0 candidate_we_vote_id = None measure_id = 0 measure_we_vote_id = None return position_public_support_count_for_ballot_item_for_api( candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id, measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
# coding=utf-8 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2012, Intel Performance Learning Solutions Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VM related 'glue' :-) """ #pylint: disable=R0914,W0142,R0912,R0915 from nova import compute from nova import utils from nova.compute import task_states from nova.compute import vm_states from nova.compute import flavors from nova.openstack.common import log from occi import exceptions from occi.extensions import infrastructure from occi_os_api.extensions import os_mixins from occi_os_api.extensions import os_addon COMPUTE_API = compute.API() LOG = log.getLogger(__name__) def create_vm(entity, context): """ Create a VM for an given OCCI entity. entity -- the OCCI resource entity. context -- the os context. """ # TODO: needs major overhaul! if 'occi.compute.hostname' in entity.attributes: name = entity.attributes['occi.compute.hostname'] else: name = None key_name = key_data = None password = utils.generate_password() access_ip_v4 = None access_ip_v6 = None user_data = None metadata = {} injected_files = [] min_count = max_count = 1 requested_networks = None sg_names = [] availability_zone = None config_drive = None block_device_mapping = [] kernel_id = ramdisk_id = None auto_disk_config = None scheduler_hints = None resource_template = None os_template = None for mixin in entity.mixins: if isinstance(mixin, os_mixins.ResourceTemplate): resource_template = mixin elif isinstance(mixin, os_mixins.OsTemplate): os_template = mixin elif mixin == os_addon.OS_KEY_PAIR_EXT: attr = 'org.openstack.credentials.publickey.name' key_name = entity.attributes[attr] attr = 'org.openstack.credentials.publickey.data' key_data = entity.attributes[attr] elif mixin == os_addon.OS_USER_DATA_EXT: attr = 'org.openstack.compute.user_data' user_data = entity.attributes[attr] # Look for security group. If the group is non-existant, the # call to create will fail. if os_addon.SEC_GROUP in mixin.related: secgroup = COMPUTE_API.security_group_api.get(context, name=mixin.term) sg_names.append(secgroup["name"]) if not os_template: raise AttributeError('Please provide a valid OS Template.') if resource_template: inst_type = flavors.get_flavor_by_flavor_id(resource_template.res_id) else: inst_type = None # Create block device mapping for link in entity.links: if not 'occi.storagelink.state' in link.attributes: continue mapping = { 'volume_id': link.target.attributes['occi.core.id'], 'delete_on_termination': '0', } device_id = link.attributes.get('occi.storagelink.deviceid') if device_id: mapping['device_name'] = device_id block_device_mapping.append(mapping) # make the call try: (instances, _reservation_id) = COMPUTE_API.create( context=context, instance_type=inst_type, image_href=os_template.os_id, kernel_id=kernel_id, ramdisk_id=ramdisk_id, min_count=min_count, max_count=max_count, display_name=name, display_description=name, key_name=key_name, key_data=key_data, security_group=sg_names, availability_zone=availability_zone, user_data=user_data, metadata=metadata, injected_files=injected_files, admin_password=password, block_device_mapping=block_device_mapping, access_ip_v4=access_ip_v4, access_ip_v6=access_ip_v6, requested_networks=requested_networks, config_drive=config_drive, auto_disk_config=auto_disk_config, scheduler_hints=scheduler_hints) except Exception as e: raise AttributeError(e.message) # return first instance return instances[0] def rebuild_vm(uid, image_href, context): """ Rebuilds the specified VM with the supplied OsTemplate mixin. uid -- id of the instance image_href -- image reference. context -- the os context """ instance = get_vm(uid, context) admin_password = utils.generate_password() kwargs = {} try: COMPUTE_API.rebuild(context, instance, image_href, admin_password, **kwargs) except Exception as e: raise AttributeError(e.message) def resize_vm(uid, flavor_id, context): """ Resizes a VM up or down Update: libvirt now supports resize see: http://wiki.openstack.org/HypervisorSupportMatrix uid -- id of the instance flavor_id -- image reference. context -- the os context """ instance = get_vm(uid, context) kwargs = {} try: flavor = flavors.get_flavor_by_flavor_id(flavor_id) COMPUTE_API.resize(context, instance, flavor_id=flavor['flavorid'], **kwargs) ready = False i = 0 # XXX are 15 secs enough to resize? while not ready and i < 15: i += 1 state = get_vm(uid, context)['vm_state'] if state == 'resized': ready = True import time time.sleep(1) instance = get_vm(uid, context) COMPUTE_API.confirm_resize(context, instance) except Exception as e: raise AttributeError(str(e)) def delete_vm(uid, context): """ Destroy a VM. uid -- id of the instance context -- the os context """ try: instance = get_vm(uid, context) COMPUTE_API.delete(context, instance) except Exception as error: raise exceptions.HTTPError(500, str(error)) def suspend_vm(uid, context): """ Suspends a VM. Use the start action to unsuspend a VM. uid -- id of the instance context -- the os context """ instance = get_vm(uid, context) try: COMPUTE_API.pause(context, instance) except Exception as error: raise exceptions.HTTPError(500, str(error)) def snapshot_vm(uid, image_name, context): """ Snapshots a VM. Use the start action to unsuspend a VM. uid -- id of the instance image_name -- name of the new image context -- the os context """ instance = get_vm(uid, context) try: COMPUTE_API.snapshot(context, instance, image_name) except Exception as e: raise AttributeError(e.message) def start_vm(uid, context): """ Starts a vm that is in the stopped state. Note, currently we do not use the nova start and stop, rather the resume/suspend methods. The start action also unpauses a paused VM. uid -- id of the instance state -- the state the VM is in (str) context -- the os context """ instance = get_vm(uid, context) try: if instance['vm_state'] in [vm_states.PAUSED]: COMPUTE_API.unpause(context, instance) elif instance['vm_state'] in [vm_states.SUSPENDED]: COMPUTE_API.resume(context, instance) # the following will probably not happen, as COMPUTE_API.stop() # is never called. elif instance['vm_state'] in [vm_states.STOPPED]: COMPUTE_API.start(context, instance) else: raise exceptions.HTTPError(500, ("Unable to map start to " "appropriate OS action.")) except exceptions.HTTPError as e: raise e except Exception as e: raise AttributeError(e.message) def stop_vm(uid, context): """ Stops a VM. Rather than use stop, suspend is used. OCCI -> graceful, acpioff, poweroff OS -> unclear uid -- id of the instance context -- the os context """ instance = get_vm(uid, context) try: COMPUTE_API.suspend(context, instance) except Exception as e: raise AttributeError(e.message) def restart_vm(uid, method, context): """ Restarts a VM. OS types == SOFT, HARD OCCI -> graceful, warm and cold mapping: - SOFT -> graceful, warm - HARD -> cold uid -- id of the instance method -- how the machine should be restarted. context -- the os context """ instance = get_vm(uid, context) if method in ('graceful', 'warm'): reboot_type = 'SOFT' elif method == 'cold': reboot_type = 'HARD' else: raise AttributeError('Unknown method.') try: COMPUTE_API.reboot(context, instance, reboot_type) except Exception as e: raise AttributeError(e.message) def attach_volume(instance_id, volume_id, device_name, context): """ Attaches a storage volume. instance_id -- Id of the VM. volume_id -- Id of the storage volume. device_name -- Where to attach. context -- The os security context. Returns the device name where the volume is attached """ instance = get_vm(instance_id, context) try: return COMPUTE_API.attach_volume(context, instance, volume_id, device_name) except Exception as e: raise AttributeError(e.message) def detach_volume(instance_id, volume, context): """ Detach a storage volume. volume -- Volume description. instance_id -- Id of the VM. context -- the os context. """ try: instance = get_vm(instance_id, context) COMPUTE_API.detach_volume(context, instance, volume) except Exception as e: raise AttributeError(e) def set_password_for_vm(uid, password, context): """ Set new password for an VM. uid -- Id of the instance. password -- The new password. context -- The os context. """ instance = get_vm(uid, context) try: COMPUTE_API.set_admin_password(context, instance, password) except Exception as e: raise AttributeError(e.message) def get_vnc(uid, context): """ Retrieve VNC console or None if unavailable. uid -- id of the instance context -- the os context """ console = None instance = get_vm(uid, context) try: console = COMPUTE_API.get_vnc_console(context, instance, 'novnc') except Exception: LOG.warn('Console info is not available atm!') finally: return console def get_vm(uid, context): """ Retrieve an VM instance from nova. uid -- id of the instance context -- the os context """ try: instance = COMPUTE_API.get(context, uid, want_objects=True) except Exception: raise exceptions.HTTPError(404, 'VM not found!') return instance def get_vms(context): """ Retrieve all VMs in a given context. """ opts = {'deleted': False} tmp = COMPUTE_API.get_all(context, search_opts=opts) return tmp def get_vm_state(uid, context): """ See nova/compute/vm_states.py nova/compute/task_states.py Mapping assumptions: - active == VM can service requests from network. These requests can be from users or VMs - inactive == the oppose! :-) - suspended == machine in a frozen state e.g. via suspend or pause uid -- Id of the VM. context -- the os context. """ instance = get_vm(uid, context) state = 'inactive' actions = [] if instance['vm_state'] in [vm_states.ACTIVE]: state = 'active' actions.append(infrastructure.STOP) actions.append(infrastructure.SUSPEND) actions.append(infrastructure.RESTART) elif instance['vm_state'] in [vm_states.BUILDING]: state = 'inactive' elif instance['vm_state'] in [vm_states.PAUSED, vm_states.SUSPENDED, vm_states.STOPPED]: state = 'inactive' actions.append(infrastructure.START) elif instance['vm_state'] in [vm_states.RESCUED, vm_states.ERROR, vm_states.DELETED]: state = 'inactive' # Some task states require a state if instance['vm_state'] in [task_states.IMAGE_SNAPSHOT]: state = 'inactive' actions = [] return state, actions # Image management def retrieve_image(uid, context): """ Return details on an image. """ try: return COMPUTE_API.image_service.show(context, uid) except Exception as e: raise AttributeError(e.message) def retrieve_images(context): """ Retrieve list of images. """ return COMPUTE_API.image_service.detail(context) def retrieve_flavors(context): """ Retrieve list of flavors. """ return flavors.get_all_flavors(context)
import re import unicodedata import warnings from gzip import GzipFile from htmlentitydefs import name2codepoint try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.utils.encoding import force_unicode from django.utils.functional import allow_lazy, SimpleLazyObject from django.utils.translation import ugettext_lazy, ugettext as _, pgettext # Capitalizes the first letter of a string. capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:] capfirst = allow_lazy(capfirst, unicode) # Set up regular expressions re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U|re.S) re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>', re.S) def wrap(text, width): """ A word-wrap function that preserves existing line breaks and most spaces in the text. Expects that existing line breaks are posix newlines. """ text = force_unicode(text) def _generator(): it = iter(text.split(' ')) word = it.next() yield word pos = len(word) - word.rfind('\n') - 1 for word in it: if "\n" in word: lines = word.split('\n') else: lines = (word,) pos += len(lines[0]) + 1 if pos > width: yield '\n' pos = len(lines[-1]) else: yield ' ' if len(lines) > 1: pos = len(lines[-1]) yield word return u''.join(_generator()) wrap = allow_lazy(wrap, unicode) class Truncator(SimpleLazyObject): """ An object used to truncate text, either by characters or words. """ def __init__(self, text): super(Truncator, self).__init__(lambda: force_unicode(text)) def add_truncation_text(self, text, truncate=None): if truncate is None: truncate = pgettext( 'String to return when truncating text', u'%(truncated_text)s...') truncate = force_unicode(truncate) if '%(truncated_text)s' in truncate: return truncate % {'truncated_text': text} # The truncation text didn't contain the %(truncated_text)s string # replacement argument so just append it to the text. if text.endswith(truncate): # But don't append the truncation text if the current text already # ends in this. return text return '%s%s' % (text, truncate) def chars(self, num, truncate=None): """ Returns the text truncated to be no longer than the specified number of characters. Takes an optional argument of what should be used to notify that the string has been truncated, defaulting to a translatable string of an ellipsis (...). """ length = int(num) text = unicodedata.normalize('NFC', self._wrapped) # Calculate the length to truncate to (max length - end_text length) truncate_len = length for char in self.add_truncation_text('', truncate): if not unicodedata.combining(char): truncate_len -= 1 if truncate_len == 0: break s_len = 0 end_index = None for i, char in enumerate(text): if unicodedata.combining(char): # Don't consider combining characters # as adding to the string length continue s_len += 1 if end_index is None and s_len > truncate_len: end_index = i if s_len > length: # Return the truncated string return self.add_truncation_text(text[:end_index or 0], truncate) # Return the original string since no truncation was necessary return text chars = allow_lazy(chars) def words(self, num, truncate=None, html=False): """ Truncates a string after a certain number of words. Takes an optional argument of what should be used to notify that the string has been truncated, defaulting to ellipsis (...). """ length = int(num) if html: return self._html_words(length, truncate) return self._text_words(length, truncate) words = allow_lazy(words) def _text_words(self, length, truncate): """ Truncates a string after a certain number of words. Newlines in the string will be stripped. """ words = self._wrapped.split() if len(words) > length: words = words[:length] return self.add_truncation_text(u' '.join(words), truncate) return u' '.join(words) def _html_words(self, length, truncate): """ Truncates HTML to a certain number of words (not counting tags and comments). Closes opened tags if they were correctly closed in the given HTML. Newlines in the HTML are preserved. """ if length <= 0: return u'' html4_singlets = ( 'br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input' ) # Count non-HTML words and keep note of open tags pos = 0 end_text_pos = 0 words = 0 open_tags = [] while words <= length: m = re_words.search(self._wrapped, pos) if not m: # Checked through whole string break pos = m.end(0) if m.group(1): # It's an actual non-HTML word words += 1 if words == length: end_text_pos = pos continue # Check for tag tag = re_tag.match(m.group(0)) if not tag or end_text_pos: # Don't worry about non tags or tags after our truncate point continue closing_tag, tagname, self_closing = tag.groups() # Element names are always case-insensitive tagname = tagname.lower() if self_closing or tagname in html4_singlets: pass elif closing_tag: # Check for match in open tags list try: i = open_tags.index(tagname) except ValueError: pass else: # SGML: An end tag closes, back to the matching start tag, # all unclosed intervening start tags with omitted end tags open_tags = open_tags[i + 1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) if words <= length: # Don't try to close tags if we don't need to truncate return self._wrapped out = self._wrapped[:end_text_pos] truncate_text = self.add_truncation_text('', truncate) if truncate_text: out += truncate_text # Close any tags still open for tag in open_tags: out += '</%s>' % tag # Return string return out def truncate_words(s, num, end_text='...'): warnings.warn('This function has been deprecated. Use the Truncator class ' 'in django.utils.text instead.', category=PendingDeprecationWarning) truncate = end_text and ' %s' % end_text or '' return Truncator(s).words(num, truncate=truncate) truncate_words = allow_lazy(truncate_words, unicode) def truncate_html_words(s, num, end_text='...'): warnings.warn('This function has been deprecated. Use the Truncator class ' 'in django.utils.text instead.', category=PendingDeprecationWarning) truncate = end_text and ' %s' % end_text or '' return Truncator(s).words(num, truncate=truncate, html=True) truncate_html_words = allow_lazy(truncate_html_words, unicode) def get_valid_filename(s): """ Returns the given string converted to a string that can be used for a clean filename. Specifically, leading and trailing spaces are removed; other spaces are converted to underscores; and anything that is not a unicode alphanumeric, dash, underscore, or dot, is removed. >>> get_valid_filename("john's portrait in 2004.jpg") u'johns_portrait_in_2004.jpg' """ s = force_unicode(s).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', s) get_valid_filename = allow_lazy(get_valid_filename, unicode) def get_text_list(list_, last_word=ugettext_lazy(u'or')): """ >>> get_text_list(['a', 'b', 'c', 'd']) u'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') u'a, b and c' >>> get_text_list(['a', 'b'], 'and') u'a and b' >>> get_text_list(['a']) u'a' >>> get_text_list([]) u'' """ if len(list_) == 0: return u'' if len(list_) == 1: return force_unicode(list_[0]) return u'%s %s %s' % ( # Translators: This string is used as a separator between list elements _(', ').join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1])) get_text_list = allow_lazy(get_text_list, unicode) def normalize_newlines(text): return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text)) normalize_newlines = allow_lazy(normalize_newlines, unicode) def recapitalize(text): "Recapitalizes text, placing caps after end-of-sentence punctuation." text = force_unicode(text).lower() capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])') text = capsRE.sub(lambda x: x.group(1).upper(), text) return text recapitalize = allow_lazy(recapitalize) def phone2numeric(phone): "Converts a phone number with letters into its numeric equivalent." char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9', } return u''.join(char2number.get(c, c) for c in phone.lower()) phone2numeric = allow_lazy(phone2numeric) # From http://www.xhaus.com/alan/python/httpcomp.html#gzip # Used with permission. def compress_string(s): zbuf = StringIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue() ustring_re = re.compile(u"([\u0080-\uffff])") def javascript_quote(s, quote_double_quotes=False): def fix(match): return r"\u%04x" % ord(match.group(1)) if type(s) == str: s = s.decode('utf-8') elif type(s) != unicode: raise TypeError(s) s = s.replace('\\', '\\\\') s = s.replace('\r', '\\r') s = s.replace('\n', '\\n') s = s.replace('\t', '\\t') s = s.replace("'", "\\'") if quote_double_quotes: s = s.replace('"', '&quot;') return str(ustring_re.sub(fix, s)) javascript_quote = allow_lazy(javascript_quote, unicode) # Expression to match some_token and some_token="with spaces" (and similarly # for single-quoted strings). smart_split_re = re.compile(r""" ((?: [^\s'"]* (?: (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*') [^\s'"]* )+ ) | \S+) """, re.VERBOSE) def smart_split(text): r""" Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >>> list(smart_split(r'This is "a person\'s" test.')) [u'This', u'is', u'"a person\\\'s"', u'test.'] >>> list(smart_split(r"Another 'person\'s' test.")) [u'Another', u"'person\\'s'", u'test.'] >>> list(smart_split(r'A "\"funky\" style" test.')) [u'A', u'"\\"funky\\" style"', u'test.'] """ text = force_unicode(text) for bit in smart_split_re.finditer(text): yield bit.group(0) smart_split = allow_lazy(smart_split, unicode) def _replace_entity(match): text = match.group(1) if text[0] == u'#': text = text[1:] try: if text[0] in u'xX': c = int(text[1:], 16) else: c = int(text) return unichr(c) except ValueError: return match.group(0) else: try: return unichr(name2codepoint[text]) except (ValueError, KeyError): return match.group(0) _entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));") def unescape_entities(text): return _entity_re.sub(_replace_entity, text) unescape_entities = allow_lazy(unescape_entities, unicode) def unescape_string_literal(s): r""" Convert quoted string literals to unquoted strings with escaped quotes and backslashes unquoted:: >>> unescape_string_literal('"abc"') 'abc' >>> unescape_string_literal("'abc'") 'abc' >>> unescape_string_literal('"a \"bc\""') 'a "bc"' >>> unescape_string_literal("'\'ab\' c'") "'ab' c" """ if s[0] not in "\"'" or s[-1] != s[0]: raise ValueError("Not a string literal: %r" % s) quote = s[0] return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\') unescape_string_literal = allow_lazy(unescape_string_literal)
# Copyright Hybrid Logic Ltd. See LICENSE file for details. """ This module implements tools for exposing Python methods as API endpoints. """ from __future__ import absolute_import __all__ = [ "EndpointResponse", "structured", "user_documentation", ] from functools import wraps from json import loads, dumps from pyrsistent import PRecord, field, pvector from twisted.internet.defer import maybeDeferred from twisted.web.http import OK, INTERNAL_SERVER_ERROR from eliot import Logger, writeFailure, Action from eliot.twisted import DeferredContext from ._error import DECODING_ERROR, BadRequest, InvalidRequestJSON from ._logging import LOG_SYSTEM, REQUEST, JSON_REQUEST from ._schema import getValidator _ASCENDING = b"ascending" _DESCENDING = b"descending" _logger = Logger() class EndpointResponse(object): """ An endpoint can return an L{EndpointResponse} instance to return a custom response code to the client along with a successful response body. """ def __init__(self, code, result): """ @param code: The HTTP response code to set in the response. @type code: L{int} @param result: The (structured) value to put into the response body. This must be JSON encodeable. """ self.code = code self.result = result def _get_logger(self): """ Find the specific or default ``Logger``. :return: A ``Logger`` object. """ try: logger = self.logger except AttributeError: return _logger else: if logger is None: logger = _logger return logger def _logging(original): """ Decorate a method which implements an API endpoint to add Eliot-based logging. Calls to the decorated function will be in a L{REQUEST} action. If the decorated function raises an exception then the exception will be logged and a token which identifies that log event sent in the response. """ @wraps(original) def logger(self, request, **routeArguments): logger = _get_logger(self) # If this is ever more than ASCII we might have issues? or maybe # this is pre-url decoding? # https://clusterhq.atlassian.net/browse/FLOC-1602 action = REQUEST(logger, request_path=request.path, method=request.method) # Generate a serialized action context that uniquely identifies # position within the logs, though there won't actually be any log # message with that particular task level: incidentIdentifier = action.serialize_task_id() with action.context(): d = DeferredContext(original(self, request, **routeArguments)) def failure(reason): if reason.check(BadRequest): code = reason.value.code result = reason.value.result else: writeFailure(reason, logger, LOG_SYSTEM) code = INTERNAL_SERVER_ERROR result = incidentIdentifier request.setResponseCode(code) request.responseHeaders.setRawHeaders( b"content-type", [b"application/json"]) return dumps(result) d.addErrback(failure) d.addActionFinish() return d.result return logger def _remote_logging(original): """ Decorate a method which implements an API endpoint to do Eliot-based log tracing; that is, the ability to continue a remote task. The remote task's context will be extracted from a C{X-Eliot-Task-Id} HTTP header. :param original: Function to wrap. :return: Wrapped function. """ @wraps(original) def logger(self, request, **routeArguments): serialized_remote_task = request.requestHeaders.getRawHeaders( "X-Eliot-Task-Id", [None])[0] if serialized_remote_task is None: return original(self, request, **routeArguments) try: action = Action.continue_task(task_id=serialized_remote_task) except ValueError: return original(self, request, **routeArguments) with action.context(): d = DeferredContext(original(self, request, **routeArguments)) d.addActionFinish() return d.result return logger def _serialize(outputValidator): """ Decorate a function so that its return value is automatically JSON encoded into a structure indicating a successful result. @param outputValidator: A L{jsonschema} validator for the returned JSON. @return: A decorator that decorates a function with the signature of a Klein route endpoint that may return a Deferred. """ def deco(original): def success(result, request): code = OK if isinstance(result, EndpointResponse): code = result.code result = result.result outputValidator.validate(result) request.responseHeaders.setRawHeaders( b"content-type", [b"application/json"]) request.setResponseCode(code) return dumps(result) def doit(self, request, **routeArguments): result = maybeDeferred(original, self, request, **routeArguments) result.addCallback(success, request) return result return doit return deco def structured(inputSchema, outputSchema, schema_store=None, ignore_body=False): """ Decorate a Klein-style endpoint method so that the request body is automatically decoded and the response body is automatically encoded. Items in the object encoded in the request body will be passed to C{original} as keyword arguments. For example:: {"foo": "bar"} If this request body is received it will be as if the decorated function were called like:: original(foo="bar") The encoded form of the object returned by C{original} will define the response body. :param inputSchema: JSON Schema describing the request body. :param outputSchema: JSON Schema describing the response body. :param schema_store: A mapping between schema paths (e.g. ``b/v1/types.json``) and the JSON schema structure, allowing input/output schemas to just be references. :param ignore_body: If true, the body is not passed to the endpoint regardless of HTTP method, in particular including ``POST``. By default the body is only ignored for ``GET`` and ``HEAD``. """ if schema_store is None: schema_store = {} inputValidator = getValidator(inputSchema, schema_store) outputValidator = getValidator(outputSchema, schema_store) def deco(original): @wraps(original) @_remote_logging @_logging @_serialize(outputValidator) def loadAndDispatch(self, request, **routeArguments): if request.method in (b"GET", b"DELETE") or ignore_body: objects = {} else: body = request.content.read() try: objects = loads(body) except ValueError: raise DECODING_ERROR errors = [] for error in inputValidator.iter_errors(objects): errors.append(error.message) if errors: raise InvalidRequestJSON(errors=errors, schema=inputSchema) eliot_action = JSON_REQUEST(_get_logger(self), json=objects.copy()) with eliot_action.context(): # Just assume there are no conflicts between these collections # of arguments right now. When there is a schema for the JSON # hopefully we can do some static verification that no routing # arguments conflict with any top-level keys in the request # body and then we can be sure there are no conflicts here. objects.update(routeArguments) d = DeferredContext(maybeDeferred(original, self, **objects)) def got_result(result): code = OK json = result if isinstance(result, EndpointResponse): code = result.code json = result.result eliot_action.add_success_fields(code=code, json=json) return result d.addCallback(got_result) d.addActionFinish() return d.result loadAndDispatch.inputSchema = inputSchema loadAndDispatch.outputSchema = outputSchema return loadAndDispatch return deco class UserDocumentation(PRecord): """ """ text = field(type=unicode, mandatory=True) header = field(type=unicode, mandatory=True) section = field(type=unicode, mandatory=True) examples = field(mandatory=True) def user_documentation(text, header, section, examples=None): """ Annotate a klein-style endpoint to include user-facing documentation. :param unicode text: The documentation to be included in the generated API documentation along with the decorated endpoint. :param unicode header: The header to be included in the generated API docs. :param unicode section: The section of the docs to include this route in. :param list examples: The identifiers of any examples demonstrating the use of this example to include in the generated API documentation along with the decorated endpoint. """ if examples is None: examples = [] def deco(f): f.user_documentation = UserDocumentation( text=text, examples=pvector(examples), header=header, section=section) return f return deco def private_api(f): """ Annotate a klein-style endpoint to indicate it should not be included in user-facing API documentation. """ f.private_api = True return f
# Copyright 2010 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing data providers for StringProperty.""" import random from soc.modules.seeder.logic.providers.provider import BaseDataProvider from soc.modules.seeder.logic.providers.provider import DataProviderParameter from soc.modules.seeder.logic.providers.provider import FixedValueProvider from soc.modules.seeder.logic.providers.provider import ParameterValueError class StringProvider(BaseDataProvider): """Base class for all data providers that return a string.""" class FixedStringProvider(StringProvider, FixedValueProvider): """Data provider that returns a fixed string.""" class FixedLengthAscendingNumericStringProvider(StringProvider): """Data provider that returns a fixed length ascending numeric string. This is useful to generate strings needing to be unique, e.g. link_id. """ def __init__(self, length=6, start=0): """Constructor. """ self.num = start self.length = length def getValue(self): """Generates the next value. """ value = self.normalize() self.num += 1 return value def normalize(self): """Transforms the num into a normalized string. """ string = str(self.num) string_length = len(string) if self.length > string_length: string = '0' * (self.length-len(string)) + string elif self.length < string_length: string = string[0:self.length] return string class LinkIDProvider(StringProvider): """Data provider that returns a string suitable for use as link_id. """ def __init__(self, model_class): self._model_class = model_class def getValue(self): q = self._model_class.all() q.order("-link_id") last = q.get() last_id = last.link_id[1:] if last else -1 start = int(last_id) + 1 link_id_provider = FixedLengthAscendingNumericStringProvider(start=start) return "m" + link_id_provider.getValue() class UniqueIDProvider(StringProvider): """Data provider that returns a unique identifier.""" counter = 0 def getValue(self): """See provider.BaseDataProvider.getValue for specification.""" # this class does not need to be thread safe as long as one unit test # is executed by a single thread; different unit tests may get the same # values, as datastore is cleared between the tests UniqueIDProvider.counter += 1 return 'm' + FixedLengthAscendingNumericStringProvider( start=UniqueIDProvider.counter).getValue() class NextLinkIDProvider(FixedLengthAscendingNumericStringProvider): """Data provider that returns a string suitable for use as link_id. """ def getValue(self): return "m" + super(NextLinkIDProvider, self).getValue() class KeyNameProvider(StringProvider): """Data proider that returns a key_name.""" # TODO(nathaniel): This is not legitimate polymorphism. Find a way # to eliminate this lint suppression. def getValue(self, values): # pylint: disable=arguments-differ key_name = values['link_id'] scope = values.get('scope', None) if scope: key_name = scope.key().name() + '/' + key_name return key_name class DocumentKeyNameProvider(KeyNameProvider): """Data proider that returns a key_name.""" def getValue(self, values): key_name = super(DocumentKeyNameProvider, self).getValue(values) prefix = values['prefix'] key_name = "%s/%s" % (prefix, key_name) return key_name class SurveyKeyNameProvider(KeyNameProvider): """Data proider that returns a key_name for Survey models. """ def getValue(self, values): key_name = super(SurveyKeyNameProvider, self).getValue(values) prefix = values['prefix'] key_name = "%s/%s" % (prefix, key_name) return key_name class RandomWordProvider(StringProvider): """Data provider that returns a random word. """ #TODO(sttwister): Find a list of real words, or make up some algorithm choices = ['dog', 'cat', 'animal', 'bat', 'chicken', 'bird', 'elephant', 'monkey', 'moose', 'zombie', 'spiderman', 'ghost', 'whale'] @classmethod def getParametersList(cls): parameters = super(RandomWordProvider, cls).getParametersList()[:] parameters += [ DataProviderParameter('choices', 'Choices', 'A comma separated list of word choices', False), DataProviderParameter('prefix', 'Prefix', 'A prefix to apply to the words.', False)] return parameters def getValue(self): prefix = self.param_values.get('prefix', '') if 'choices' in self.param_values: return prefix + random.choice(self.param_values['choices'].split(',')) else: return prefix + random.choice(self.choices) class RandomNameProvider(RandomWordProvider): """Data provider that returns a random name. """ choices = ["Adam", "John", "Steve"] def getValue(self): return ' '.join(super(RandomNameProvider, self).getValue() for _ in range(2)) class RandomPhraseProvider(StringProvider): """Data provider that returns a random phrase. """ DEFAULT_MIN_WORDS = 5 DEFAULT_MAX_WORDS = 15 @classmethod def getParametersList(cls): parameters = super(RandomPhraseProvider, cls).getParametersList()[:] parameters += [ DataProviderParameter('min_words', 'Minimum words', ('The minimum number of words to' ' include in the phrase')), DataProviderParameter('max_words', 'Maximum words', ('The maximum number of words to' ' include in the phrase')), ] return parameters def checkParameters(self): super(RandomPhraseProvider, self).checkParameters() try: minw = int(self.param_values.get('min_words', 1)) except ValueError: raise ParameterValueError('Value supplied for min_words is not integer') try: maxw = int(self.param_values.get('max_words', 1)) except ValueError: raise ParameterValueError('Value supplied for max_words is not integer') if minw <= 0: raise ParameterValueError('Value supplied for min_words must be positive') if maxw <= 0: raise ParameterValueError('Value supplied for max_words must be positive') def getValue(self): self.checkParameters() word_provider = RandomWordProvider() minw = int(self.param_values.get('min_words', self.DEFAULT_MIN_WORDS)) maxw = int(self.param_values.get('max_words', self.DEFAULT_MAX_WORDS)) words = random.randint(minw, maxw) phrase = ' '.join(word_provider.getValue() for i in range(words)) #@UnusedVariable phrase = phrase.capitalize() phrase += '.' return phrase
""" Sikuli script implementation for Python See https://github.com/glitchassassin/lackey """ from zipfile import ZipFile try: import Tkinter as tk import tkFileDialog import tkMessageBox except ImportError: import tkinter as tk from tkinter import filedialog as tkFileDialog from tkinter import messagebox as tkMessageBox import platform import keyboard try: import thread except ImportError: import _thread as thread import sys import time import os import warnings import requests ## Lackey sub-files #from .PlatformManagerWindows import PlatformManagerWindows from .KeyCodes import Button, Key, KeyModifier from .RegionMatching import Pattern, Region, Match, Screen, ObserveEvent, PlatformManager, FOREVER from .Geometry import Location from .InputEmulation import Mouse, Keyboard from .App import App from .Exceptions import FindFailed, ImageMissing from .SettingsDebug import Debug, Settings, DebugMaster, SettingsMaster from .SikuliGui import PopupInput, PopupList, PopupTextarea from ._version import __version__ from . import ImportHandler VALID_PLATFORMS = ["Windows", "Darwin"] ## Define script abort hotkey (Alt+Shift+C) def _abort_script(): thread.interrupt_main() # If we are not on Unix, or if we are on Unix and have root privileges, start the # alt+shift+c hotkey listener to abort the script if (not hasattr(os, "geteuid") or os.geteuid() == 0): keyboard.add_hotkey("alt+shift+c", _abort_script, suppress=True) print("Use Alt+Shift+C to abort script manually") else: print("No root privileges: Unable to set Alt+Shift+C listener to abort script.") ## Sikuli patching: Functions that map to the global Screen region ## Don't try this at home, kids! # First, save the native functions by remapping them with a trailing underscore: type_ = type input_ = input exit_ = sys.exit #zip_ = zip # Deprecated underscore functions def _exit(code): warnings.warn("Please use exit_ instead.", DeprecationWarning) return exit_(code) def _input(prompt): warnings.warn("Please use input_ instead.", DeprecationWarning) return input_(prompt) def _type(obj): warnings.warn("Please use type_ instead.", DeprecationWarning) return type_(obj) ## Sikuli Convenience Functions def sleep(seconds): """ Convenience function. Pauses script for `seconds`. """ time.sleep(seconds) def exit(value): """ Convenience function. Exits with code `value`. """ sys.exit(value) def setShowActions(value): """ Convenience function. Sets "show actions" setting (True or False) """ Settings.ShowActions = bool(value) def getBundlePath(): """ Convenience function. Returns the path of the \\*.sikuli bundle. """ return Settings.BundlePath def getBundleFolder(): """ Convenience function. Same as `getBundlePath()` plus the OS default path separator. """ return getBundlePath() + os.path.sep def setBundlePath(path): """ Convenience function. Changes the path of the \\*.sikuli bundle. """ if os.path.exists(path): Settings.BundlePath = path else: raise OSError("File not found: " + path) def getImagePath(): """ Convenience function. Returns a list of paths to search for images. """ return [getBundlePath()] + Settings.ImagePaths def addImagePath(new_path): """ Convenience function. Adds a path to the list of paths to search for images. Can be a URL (but must be accessible). """ if os.path.exists(new_path): Settings.ImagePaths.append(new_path) elif "http://" in new_path or "https://" in new_path: request = requests.get(new_path) if request.status_code < 400: # Path exists Settings.ImagePaths.append(new_path) else: raise OSError("Unable to connect to " + new_path) else: raise OSError("File not found: " + new_path) def addHTTPImagePath(new_path): """ Convenience function. Same as `addImagePath()`. """ addImagePath(new_path) def getParentPath(): """ Convenience function. Returns the parent folder of the \\*.sikuli bundle. """ return os.path.dirname(Settings.BundlePath) def getParentFolder(): """ Convenience function. Same as `getParentPath()` plus the OS default path separator. """ return getParentPath() + os.path.sep def makePath(*args): """ Convenience function. Returns a path from a series of path components. Same as `os.path.join`. """ return os.path.join(*args) def makeFolder(*args): """ Convenience function. Same as `makePath()` plus the OS default path separator. """ return makePath(*args) + os.path.sep ## Sikuli implements the unzip() file, below. Included here to avoid breaking old ## scripts. ``zipfile()`` is coded here, but not included in Sikuli, so I've ## commented it out for the time being. Note that ``zip`` is a reserved keyword ## in Python. def unzip(from_file, to_folder): """ Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`. """ with ZipFile(os.path.abspath(from_file), 'r') as to_unzip: to_unzip.extractall(os.path.abspath(to_folder)) #def zipfile(fromFolder, toFile): # with ZipFile(toFile, 'w') as to_zip: # for root, dirs, files in os.walk(fromFolder): # for file in files: # to_zip.write(os.path.join(root, file)) ## Popup/input dialogs def popat(*args): """ Convenience function. Sets the popup location (currently not used). """ if len(args) == 2 and isinstance(args[0], int) and isinstance(args[1], int): # popat(x,y) Settings.PopupLocation = Location(args[0], args[1]) elif len(args) == 1 and isinstance(args[0], Location): # popat(location) Settings.PopupLocation = args[0] elif len(args) == 1 and isinstance(args[0], Region): Settings.PopupLocation = args[0].getCenter() elif len(args) == 0: Settings.PopupLocation = SCREEN.getCenter() else: raise TypeError("Unrecognized parameter(s) for popat") def popup(text, title="Lackey Info"): """ Creates an info dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showinfo(title, text) def popError(text, title="Lackey Error"): """ Creates an error dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showerror(title, text) def popAsk(text, title="Lackey Decision"): """ Creates a yes-no dialog with the specified text. """ root = tk.Tk() root.withdraw() return tkMessageBox.askyesno(title, text) # Be aware this overwrites the Python input() command-line function. def input(msg="", default="", title="Lackey Input", hidden=False): """ Creates an input dialog with the specified message and default text. If `hidden`, creates a password dialog instead. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(default) PopupInput(root, msg, title, hidden, input_text) root.focus_force() root.mainloop() return str(input_text.get()) def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get()) def select(message="", title="Lackey Input", options=None, default=None): """ Creates a dropdown selection dialog with the specified message and options `default` must be one of the options. Returns the selected value. """ if options is None or len(options) == 0: return "" if default is None: default = options[0] if default not in options: raise ValueError("<<default>> not in options[]") root = tk.Tk() input_text = tk.StringVar() input_text.set(message) PopupList(root, message, title, options, default, input_text) root.focus_force() root.mainloop() return str(input_text.get()) def popFile(title="Lackey Open File"): """ Creates a file selection dialog with the specified message and options. Returns the selected file. """ root = tk.Tk() root.withdraw() return str(tkFileDialog.askopenfilename(title=title)) # If this is a valid platform, set up initial Screen object. Otherwise, might be ReadTheDocs if platform.system() in VALID_PLATFORMS: SCREEN = Screen(0) for prop in dir(SCREEN): if callable(getattr(SCREEN, prop, None)) and prop[0] != "_": # Property is a method, and is not private. Dump it into the global namespace. globals()[prop] = getattr(SCREEN, prop, None)
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Miscellaneous node types. """ import os.path import re import sys from grit import constants from grit import exception from grit import util import grit.format.rc_header from grit.node import base from grit.node import io from grit.node import message # RTL languages # TODO(jennyz): remove this fixed set of RTL language array # now that generic expand_variable code exists. _RTL_LANGS = ( 'ar', # Arabic 'fa', # Farsi 'iw', # Hebrew 'ks', # Kashmiri 'ku', # Kurdish 'ps', # Pashto 'ur', # Urdu 'yi', # Yiddish ) def _ReadFirstIdsFromFile(filename, defines): """Read the starting resource id values from |filename|. We also expand variables of the form <(FOO) based on defines passed in on the command line. Returns a tuple, the absolute path of SRCDIR followed by the first_ids dictionary. """ first_ids_dict = eval(util.ReadFile(filename, util.RAW_TEXT)) src_root_dir = os.path.abspath(os.path.join(os.path.dirname(filename), first_ids_dict['SRCDIR'])) def ReplaceVariable(matchobj): for key, value in defines.iteritems(): if matchobj.group(1) == key: value = os.path.abspath(value)[len(src_root_dir) + 1:] return value return '' renames = [] for grd_filename in first_ids_dict: new_grd_filename = re.sub(r'<\(([A-Za-z_]+)\)', ReplaceVariable, grd_filename) if new_grd_filename != grd_filename: new_grd_filename = new_grd_filename.replace('\\', '/') renames.append((grd_filename, new_grd_filename)) for grd_filename, new_grd_filename in renames: first_ids_dict[new_grd_filename] = first_ids_dict[grd_filename] del(first_ids_dict[grd_filename]) return (src_root_dir, first_ids_dict) class SplicingNode(base.Node): """A node whose children should be considered to be at the same level as its siblings for most purposes. This includes <if> and <part> nodes. """ def _IsValidChild(self, child): assert self.parent, '<%s> node should never be root.' % self.name if isinstance(child, SplicingNode): return True # avoid O(n^2) behavior return self.parent._IsValidChild(child) class IfNode(SplicingNode): """A node for conditional inclusion of resources. """ def MandatoryAttributes(self): return ['expr'] def _IsValidChild(self, child): return (isinstance(child, (ThenNode, ElseNode)) or super(IfNode, self)._IsValidChild(child)) def EndParsing(self): children = self.children self.if_then_else = False if any(isinstance(node, (ThenNode, ElseNode)) for node in children): if (len(children) != 2 or not isinstance(children[0], ThenNode) or not isinstance(children[1], ElseNode)): raise exception.UnexpectedChild( '<if> element must be <if><then>...</then><else>...</else></if>') self.if_then_else = True def ActiveChildren(self): cond = self.EvaluateCondition(self.attrs['expr']) if self.if_then_else: return self.children[0 if cond else 1].ActiveChildren() else: # Equivalent to having all children inside <then> with an empty <else> return super(IfNode, self).ActiveChildren() if cond else [] class ThenNode(SplicingNode): """A <then> node. Can only appear directly inside an <if> node.""" pass class ElseNode(SplicingNode): """An <else> node. Can only appear directly inside an <if> node.""" pass class PartNode(SplicingNode): """A node for inclusion of sub-grd (*.grp) files. """ def __init__(self): super(PartNode, self).__init__() self.started_inclusion = False def MandatoryAttributes(self): return ['file'] def _IsValidChild(self, child): return self.started_inclusion and super(PartNode, self)._IsValidChild(child) class ReleaseNode(base.Node): """The <release> element.""" def _IsValidChild(self, child): from grit.node import empty return isinstance(child, (empty.IncludesNode, empty.MessagesNode, empty.StructuresNode, empty.IdentifiersNode)) def _IsValidAttribute(self, name, value): return ( (name == 'seq' and int(value) <= self.GetRoot().GetCurrentRelease()) or name == 'allow_pseudo' ) def MandatoryAttributes(self): return ['seq'] def DefaultAttributes(self): return { 'allow_pseudo' : 'true' } def GetReleaseNumber(): """Returns the sequence number of this release.""" return self.attribs['seq'] class GritNode(base.Node): """The <grit> root element.""" def __init__(self): super(GritNode, self).__init__() self.output_language = '' self.defines = {} self.substituter = None self.target_platform = sys.platform def _IsValidChild(self, child): from grit.node import empty return isinstance(child, (ReleaseNode, empty.TranslationsNode, empty.OutputsNode)) def _IsValidAttribute(self, name, value): if name not in ['base_dir', 'first_ids_file', 'source_lang_id', 'latest_public_release', 'current_release', 'enc_check', 'tc_project', 'grit_version', 'output_all_resource_defines']: return False if name in ['latest_public_release', 'current_release'] and value.strip( '0123456789') != '': return False return True def MandatoryAttributes(self): return ['latest_public_release', 'current_release'] def DefaultAttributes(self): return { 'base_dir' : '.', 'first_ids_file': '', 'grit_version': 1, 'source_lang_id' : 'en', 'enc_check' : constants.ENCODING_CHECK, 'tc_project' : 'NEED_TO_SET_tc_project_ATTRIBUTE', 'output_all_resource_defines': 'true' } def EndParsing(self): super(GritNode, self).EndParsing() if (int(self.attrs['latest_public_release']) > int(self.attrs['current_release'])): raise exception.Parsing('latest_public_release cannot have a greater ' 'value than current_release') self.ValidateUniqueIds() # Add the encoding check if it's not present (should ensure that it's always # present in all .grd files generated by GRIT). If it's present, assert if # it's not correct. if 'enc_check' not in self.attrs or self.attrs['enc_check'] == '': self.attrs['enc_check'] = constants.ENCODING_CHECK else: assert self.attrs['enc_check'] == constants.ENCODING_CHECK, ( 'Are you sure your .grd file is in the correct encoding (UTF-8)?') def ValidateUniqueIds(self): """Validate that 'name' attribute is unique in all nodes in this tree except for nodes that are children of <if> nodes. """ unique_names = {} duplicate_names = [] # To avoid false positives from mutually exclusive <if> clauses, check # against whatever the output condition happens to be right now. # TODO(benrg): do something better. for node in self.ActiveDescendants(): if node.attrs.get('generateid', 'true') == 'false': continue # Duplication not relevant in that case for node_id in node.GetTextualIds(): if util.SYSTEM_IDENTIFIERS.match(node_id): continue # predefined IDs are sometimes used more than once if node_id in unique_names and node_id not in duplicate_names: duplicate_names.append(node_id) unique_names[node_id] = 1 if len(duplicate_names): raise exception.DuplicateKey(', '.join(duplicate_names)) def GetCurrentRelease(self): """Returns the current release number.""" return int(self.attrs['current_release']) def GetLatestPublicRelease(self): """Returns the latest public release number.""" return int(self.attrs['latest_public_release']) def GetSourceLanguage(self): """Returns the language code of the source language.""" return self.attrs['source_lang_id'] def GetTcProject(self): """Returns the name of this project in the TranslationConsole, or 'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined.""" return self.attrs['tc_project'] def SetOwnDir(self, dir): """Informs the 'grit' element of the directory the file it is in resides. This allows it to calculate relative paths from the input file, which is what we desire (rather than from the current path). Args: dir: r'c:\bla' Return: None """ assert dir self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir'])) def GetBaseDir(self): """Returns the base directory, relative to the working directory. To get the base directory as set in the .grd file, use GetOriginalBaseDir() """ if hasattr(self, 'base_dir'): return self.base_dir else: return self.GetOriginalBaseDir() def GetOriginalBaseDir(self): """Returns the base directory, as set in the .grd file. """ return self.attrs['base_dir'] def ShouldOutputAllResourceDefines(self): """Returns true if all resource defines should be output, false if defines for resources not emitted to resource files should be skipped. """ return self.attrs['output_all_resource_defines'] == 'true' def GetInputFiles(self): """Returns the list of files that are read to produce the output.""" # Importing this here avoids a circular dependency in the imports. # pylint: disable-msg=C6204 from grit.node import include from grit.node import misc from grit.node import structure from grit.node import variant # Check if the input is required for any output configuration. input_files = set() old_output_language = self.output_language for lang, ctx in self.GetConfigurations(): self.SetOutputLanguage(lang or self.GetSourceLanguage()) self.SetOutputContext(ctx) for node in self.ActiveDescendants(): if isinstance(node, (io.FileNode, include.IncludeNode, misc.PartNode, structure.StructureNode, variant.SkeletonNode)): input_files.add(node.GetInputPath()) self.SetOutputLanguage(old_output_language) return sorted(map(self.ToRealPath, input_files)) def GetFirstIdsFile(self): """Returns a usable path to the first_ids file, if set, otherwise returns None. The first_ids_file attribute is by default relative to the base_dir of the .grd file, but may be prefixed by GRIT_DIR/, which makes it relative to the directory of grit.py (e.g. GRIT_DIR/../gritsettings/resource_ids). """ if not self.attrs['first_ids_file']: return None path = self.attrs['first_ids_file'] GRIT_DIR_PREFIX = 'GRIT_DIR' if (path.startswith(GRIT_DIR_PREFIX) and path[len(GRIT_DIR_PREFIX)] in ['/', '\\']): return util.PathFromRoot(path[len(GRIT_DIR_PREFIX) + 1:]) else: return self.ToRealPath(path) def GetOutputFiles(self): """Returns the list of <output> nodes that are descendants of this node's <outputs> child and are not enclosed by unsatisfied <if> conditionals. """ for child in self.children: if child.name == 'outputs': return [node for node in child.ActiveDescendants() if node.name == 'output'] raise exception.MissingElement() def GetConfigurations(self): """Returns the distinct (language, context) pairs from the output nodes. """ return set((n.GetLanguage(), n.GetContext()) for n in self.GetOutputFiles()) def GetSubstitutionMessages(self): """Returns the list of <message sub_variable="true"> nodes.""" return [n for n in self.ActiveDescendants() if isinstance(n, message.MessageNode) and n.attrs['sub_variable'] == 'true'] def SetOutputLanguage(self, output_language): """Set the output language. Prepares substitutions. The substitutions are reset every time the language is changed. They include messages designated as variables, and language codes for html and rc files. Args: output_language: a two-letter language code (eg: 'en', 'ar'...) or '' """ if not output_language: # We do not specify the output language for .grh files, # so we get an empty string as the default. # The value should match grit.clique.MessageClique.source_language. output_language = self.GetSourceLanguage() if output_language != self.output_language: self.output_language = output_language self.substituter = None # force recalculate def SetOutputContext(self, output_context): self.output_context = output_context self.substituter = None # force recalculate def SetDefines(self, defines): self.defines = defines self.substituter = None # force recalculate def SetTargetPlatform(self, target_platform): self.target_platform = target_platform def GetSubstituter(self): if self.substituter is None: self.substituter = util.Substituter() self.substituter.AddMessages(self.GetSubstitutionMessages(), self.output_language) if self.output_language in _RTL_LANGS: direction = 'dir="RTL"' else: direction = 'dir="LTR"' self.substituter.AddSubstitutions({ 'GRITLANGCODE': self.output_language, 'GRITDIR': direction, }) from grit.format import rc # avoid circular dep rc.RcSubstitutions(self.substituter, self.output_language) return self.substituter def AssignFirstIds(self, filename_or_stream, defines): """Assign first ids to each grouping node based on values from the first_ids file (if specified on the <grit> node). """ # If the input is a stream, then we're probably in a unit test and # should skip this step. if type(filename_or_stream) not in (str, unicode): return # Nothing to do if the first_ids_filename attribute isn't set. first_ids_filename = self.GetFirstIdsFile() if not first_ids_filename: return src_root_dir, first_ids = _ReadFirstIdsFromFile(first_ids_filename, defines) from grit.node import empty for node in self.Preorder(): if isinstance(node, empty.GroupingNode): abs_filename = os.path.abspath(filename_or_stream) if abs_filename[:len(src_root_dir)] != src_root_dir: filename = os.path.basename(filename_or_stream) else: filename = abs_filename[len(src_root_dir) + 1:] filename = filename.replace('\\', '/') if node.attrs['first_id'] != '': raise Exception( "Don't set the first_id attribute when using the first_ids_file " "attribute on the <grit> node, update %s instead." % first_ids_filename) try: id_list = first_ids[filename][node.name] except KeyError, e: print '-' * 78 print 'Resource id not set for %s (%s)!' % (filename, node.name) print ('Please update %s to include an entry for %s. See the ' 'comments in resource_ids for information on why you need to ' 'update that file.' % (first_ids_filename, filename)) print '-' * 78 raise e try: node.attrs['first_id'] = str(id_list.pop(0)) except IndexError, e: raise Exception('Please update %s and add a first id for %s (%s).' % (first_ids_filename, filename, node.name)) def RunGatherers(self, debug=False): '''Call RunPreSubstitutionGatherer() on every node of the tree, then apply substitutions, then call RunPostSubstitutionGatherer() on every node. The substitutions step requires that the output language has been set. Locally, get the Substitution messages and add them to the substituter. Also add substitutions for language codes in the Rc. Args: debug: will print information while running gatherers. ''' for node in self.ActiveDescendants(): if hasattr(node, 'RunPreSubstitutionGatherer'): with node: node.RunPreSubstitutionGatherer(debug=debug) assert self.output_language self.SubstituteMessages(self.GetSubstituter()) for node in self.ActiveDescendants(): if hasattr(node, 'RunPostSubstitutionGatherer'): with node: node.RunPostSubstitutionGatherer(debug=debug) class IdentifierNode(base.Node): """A node for specifying identifiers that should appear in the resource header file, and be unique amongst all other resource identifiers, but don't have any other attributes or reference any resources. """ def MandatoryAttributes(self): return ['name'] def DefaultAttributes(self): return { 'comment' : '', 'id' : '', 'systemid': 'false' } def GetId(self): """Returns the id of this identifier if it has one, None otherwise """ if 'id' in self.attrs: return self.attrs['id'] return None def EndParsing(self): """Handles system identifiers.""" super(IdentifierNode, self).EndParsing() if self.attrs['systemid'] == 'true': util.SetupSystemIdentifiers((self.attrs['name'],)) @staticmethod def Construct(parent, name, id, comment, systemid='false'): """Creates a new node which is a child of 'parent', with attributes set by parameters of the same name. """ node = IdentifierNode() node.StartParsing('identifier', parent) node.HandleAttribute('name', name) node.HandleAttribute('id', id) node.HandleAttribute('comment', comment) node.HandleAttribute('systemid', systemid) node.EndParsing() return node
#!/usr/bin/env python from nose.tools import * import networkx from test_graph import BaseAttrGraphTester, TestGraph class BaseMultiGraphTester(BaseAttrGraphTester): def test_has_edge(self): G=self.K3 assert_equal(G.has_edge(0,1),True) assert_equal(G.has_edge(0,-1),False) assert_equal(G.has_edge(0,1,0),True) assert_equal(G.has_edge(0,1,1),False) def test_get_edge_data(self): G=self.K3 assert_equal(G.get_edge_data(0,1),{0:{}}) assert_equal(G[0][1],{0:{}}) assert_equal(G[0][1][0],{}) assert_equal(G.get_edge_data(10,20),None) assert_equal(G.get_edge_data(0,1,0),{}) def test_adjacency_iter(self): G=self.K3 assert_equal(dict(G.adjacency_iter()), {0: {1: {0:{}}, 2: {0:{}}}, 1: {0: {0:{}}, 2: {0:{}}}, 2: {0: {0:{}}, 1: {0:{}}}}) def deepcopy_edge_attr(self,H,G): assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo']) G[1][2][0]['foo'].append(1) assert_not_equal(G[1][2][0]['foo'],H[1][2][0]['foo']) def shallow_copy_edge_attr(self,H,G): assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo']) G[1][2][0]['foo'].append(1) assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo']) def same_attrdict(self, H, G): # same attrdict in the edgedata old_foo=H[1][2][0]['foo'] H.add_edge(1,2,0,foo='baz') assert_equal(G.edge,H.edge) H.add_edge(1,2,0,foo=old_foo) assert_equal(G.edge,H.edge) # but not same edgedata dict H.add_edge(1,2,foo='baz') assert_not_equal(G.edge,H.edge) old_foo=H.node[0]['foo'] H.node[0]['foo']='baz' assert_equal(G.node,H.node) H.node[0]['foo']=old_foo assert_equal(G.node,H.node) def different_attrdict(self, H, G): # used by graph_equal_but_different old_foo=H[1][2][0]['foo'] H.add_edge(1,2,0,foo='baz') assert_not_equal(G.edge,H.edge) H.add_edge(1,2,0,foo=old_foo) assert_equal(G.edge,H.edge) HH=H.copy() H.add_edge(1,2,foo='baz') assert_not_equal(G.edge,H.edge) H=HH old_foo=H.node[0]['foo'] H.node[0]['foo']='baz' assert_not_equal(G.node,H.node) H.node[0]['foo']=old_foo assert_equal(G.node,H.node) def test_to_undirected(self): G=self.K3 self.add_attributes(G) H=networkx.MultiGraph(G) self.is_shallow_copy(H,G) H=G.to_undirected() self.is_deepcopy(H,G) def test_to_directed(self): G=self.K3 self.add_attributes(G) H=networkx.MultiDiGraph(G) self.is_shallow_copy(H,G) H=G.to_directed() self.is_deepcopy(H,G) def test_selfloops(self): G=self.K3 G.add_edge(0,0) assert_equal(G.nodes_with_selfloops(),[0]) assert_equal(G.selfloop_edges(),[(0,0)]) assert_equal(G.selfloop_edges(data=True),[(0,0,{})]) assert_equal(G.number_of_selfloops(),1) def test_selfloops2(self): G=self.K3 G.add_edge(0,0) G.add_edge(0,0) G.add_edge(0,0,key='parallel edge') G.remove_edge(0,0,key='parallel edge') assert_equal(G.number_of_edges(0,0),2) G.remove_edge(0,0) assert_equal(G.number_of_edges(0,0),1) def test_edge_attr4(self): G=self.Graph() G.add_edge(1,2,key=0,data=7,spam='bar',bar='foo') assert_equal(G.edges(data=True), [(1,2,{'data':7,'spam':'bar','bar':'foo'})]) G[1][2][0]['data']=10 # OK to set data like this assert_equal(G.edges(data=True), [(1,2,{'data':10,'spam':'bar','bar':'foo'})]) G.edge[1][2][0]['data']=20 # another spelling, "edge" assert_equal(G.edges(data=True), [(1,2,{'data':20,'spam':'bar','bar':'foo'})]) G.edge[1][2][0]['listdata']=[20,200] G.edge[1][2][0]['weight']=20 assert_equal(G.edges(data=True), [(1,2,{'data':20,'spam':'bar', 'bar':'foo','listdata':[20,200],'weight':20})]) class TestMultiGraph(BaseMultiGraphTester,TestGraph): def setUp(self): self.Graph=networkx.MultiGraph # build K3 ed1,ed2,ed3 = ({0:{}},{0:{}},{0:{}}) self.k3adj={0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} self.k3edges=[(0, 1), (0, 2), (1, 2)] self.k3nodes=[0, 1, 2] self.K3=self.Graph() self.K3.adj = self.K3.edge = self.k3adj self.K3.node={} self.K3.node[0]={} self.K3.node[1]={} self.K3.node[2]={} def test_data_input(self): G=self.Graph(data={1:[2],2:[1]}, name="test") assert_equal(G.name,"test") assert_equal(sorted(G.adj.items()),[(1, {2: {0:{}}}), (2, {1: {0:{}}})]) def test_getitem(self): G=self.K3 assert_equal(G[0],{1: {0:{}}, 2: {0:{}}}) assert_raises(KeyError, G.__getitem__, 'j') assert_raises((TypeError,networkx.NetworkXError), G.__getitem__, ['A']) def test_remove_node(self): G=self.K3 G.remove_node(0) assert_equal(G.adj,{1:{2:{0:{}}},2:{1:{0:{}}}}) assert_raises((KeyError,networkx.NetworkXError), G.remove_node,-1) def test_add_edge(self): G=self.Graph() G.add_edge(0,1) assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}}) G=self.Graph() G.add_edge(*(0,1)) assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}}) def test_add_edge_conflicting_key(self): G=self.Graph() G.add_edge(0,1,key=1) G.add_edge(0,1) assert_equal(G.number_of_edges(),2) G=self.Graph() G.add_edges_from([(0,1,1,{})]) G.add_edges_from([(0,1)]) assert_equal(G.number_of_edges(),2) def test_add_edges_from(self): G=self.Graph() G.add_edges_from([(0,1),(0,1,{'weight':3})]) assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}}}, 1: {0: {0:{},1:{'weight':3}}}}) G.add_edges_from([(0,1),(0,1,{'weight':3})],weight=2) assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}, 2:{'weight':2},3:{'weight':3}}}, 1: {0: {0:{},1:{'weight':3}, 2:{'weight':2},3:{'weight':3}}}}) # too few in tuple assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too many in tuple assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3,4)]) assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple def test_remove_edge(self): G=self.K3 G.remove_edge(0,1) assert_equal(G.adj,{0: {2: {0: {}}}, 1: {2: {0: {}}}, 2: {0: {0: {}}, 1: {0: {}}}}) assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0) assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,0,2, key=1) def test_remove_edges_from(self): G=self.K3.copy() G.remove_edges_from([(0,1)]) assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}}) G.remove_edges_from([(0,0)]) # silent fail self.K3.add_edge(0,1) G=self.K3.copy() G.remove_edges_from(G.edges(data=True,keys=True)) assert_equal(G.adj,{0:{},1:{},2:{}}) G=self.K3.copy() G.remove_edges_from(G.edges(data=False,keys=True)) assert_equal(G.adj,{0:{},1:{},2:{}}) G=self.K3.copy() G.remove_edges_from(G.edges(data=False,keys=False)) assert_equal(G.adj,{0:{},1:{},2:{}}) G=self.K3.copy() G.remove_edges_from([(0,1,0),(0,2,0,{}),(1,2)]) assert_equal(G.adj,{0:{1:{1:{}}},1:{0:{1:{}}},2:{}}) def test_remove_multiedge(self): G=self.K3 G.add_edge(0,1,key='parallel edge') G.remove_edge(0,1,key='parallel edge') assert_equal(G.adj,{0: {1: {0:{}}, 2: {0:{}}}, 1: {0: {0:{}}, 2: {0:{}}}, 2: {0: {0:{}}, 1: {0:{}}}}) G.remove_edge(0,1) assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}}) assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
from __future__ import unicode_literals import base64 import botocore.client import boto3 import hashlib import io import json import zipfile import sure # noqa from freezegun import freeze_time from moto import mock_lambda, mock_s3, mock_ec2, settings def _process_lamda(pfunc): zip_output = io.BytesIO() zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) zip_file.writestr('lambda_function.zip', pfunc) zip_file.close() zip_output.seek(0) return zip_output.read() def get_test_zip_file1(): pfunc = """ def lambda_handler(event, context): return event """ return _process_lamda(pfunc) def get_test_zip_file2(): pfunc = """ def lambda_handler(event, context): volume_id = event.get('volume_id') print('get volume details for %s' % volume_id) import boto3 ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url="http://{base_url}") vol = ec2.Volume(volume_id) print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size)) return event """.format(base_url="localhost:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") return _process_lamda(pfunc) @mock_lambda def test_list_functions(): conn = boto3.client('lambda', 'us-west-2') result = conn.list_functions() result['Functions'].should.have.length_of(0) @mock_lambda def test_invoke_requestresponse_function(): conn = boto3.client('lambda', 'us-west-2') conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'ZipFile': get_test_zip_file1(), }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) in_data = {'msg': 'So long and thanks for all the fish'} success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) base64.b64decode(success_result["LogResult"]).decode( 'utf-8').should.equal(json.dumps(in_data)) json.loads(success_result["Payload"].read().decode( 'utf-8')).should.equal(in_data) @mock_lambda def test_invoke_event_function(): conn = boto3.client('lambda', 'us-west-2') conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'ZipFile': get_test_zip_file1(), }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) conn.invoke.when.called_with( FunctionName='notAFunction', InvocationType='Event', Payload='{}' ).should.throw(botocore.client.ClientError) in_data = {'msg': 'So long and thanks for all the fish'} success_result = conn.invoke( FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) json.loads(success_result['Payload'].read().decode( 'utf-8')).should.equal({}) @mock_ec2 @mock_lambda def test_invoke_function_get_ec2_volume(): conn = boto3.resource("ec2", "us-west-2") vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') vol = conn.Volume(vol.id) conn = boto3.client('lambda', 'us-west-2') conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'ZipFile': get_test_zip_file2(), }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) in_data = {'volume_id': vol.id} result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps(in_data)) result["StatusCode"].should.equal(202) msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg) result['Payload'].read().decode('utf-8').should.equal(msg) @mock_lambda def test_create_based_on_s3_with_missing_bucket(): conn = boto3.client('lambda', 'us-west-2') conn.create_function.when.called_with( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'S3Bucket': 'this-bucket-does-not-exist', 'S3Key': 'test.zip', }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, VpcConfig={ "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], }, ).should.throw(botocore.client.ClientError) @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') def test_create_function_from_aws_bucket(): s3_conn = boto3.client('s3', 'us-west-2') s3_conn.create_bucket(Bucket='test-bucket') zip_content = get_test_zip_file2() s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) conn = boto3.client('lambda', 'us-west-2') result = conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, VpcConfig={ "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], }, ) # this is hard to match against, so remove it result['ResponseMetadata'].pop('HTTPHeaders', None) # Botocore inserts retry attempts not seen in Python27 result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', 'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction', 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.handler', "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, 'Version': '$LATEST', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], "VpcId": "vpc-123abc" }, 'ResponseMetadata': {'HTTPStatusCode': 201}, }) @mock_lambda @freeze_time('2015-01-01 00:00:00') def test_create_function_from_zipfile(): conn = boto3.client('lambda', 'us-west-2') zip_content = get_test_zip_file1() result = conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'ZipFile': zip_content, }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) # this is hard to match against, so remove it result['ResponseMetadata'].pop('HTTPHeaders', None) # Botocore inserts retry attempts not seen in Python27 result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', 'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction', 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.handler', 'CodeSize': len(zip_content), 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), 'Version': '$LATEST', 'VpcConfig': { "SecurityGroupIds": [], "SubnetIds": [], }, 'ResponseMetadata': {'HTTPStatusCode': 201}, }) @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') def test_get_function(): s3_conn = boto3.client('s3', 'us-west-2') s3_conn.create_bucket(Bucket='test-bucket') zip_content = get_test_zip_file1() s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) conn = boto3.client('lambda', 'us-west-2') conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) result = conn.get_function(FunctionName='testFunction') # this is hard to match against, so remove it result['ResponseMetadata'].pop('HTTPHeaders', None) # Botocore inserts retry attempts not seen in Python27 result['ResponseMetadata'].pop('RetryAttempts', None) result['Configuration'].pop('LastModified') result.should.equal({ "Code": { "Location": "s3://lambda-functions.aws.amazon.com/test.zip", "RepositoryType": "S3" }, "Configuration": { "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", "Timeout": 3, "Version": '$LATEST', "VpcConfig": { "SecurityGroupIds": [], "SubnetIds": [], } }, 'ResponseMetadata': {'HTTPStatusCode': 200}, }) @mock_lambda @mock_s3 def test_delete_function(): s3_conn = boto3.client('s3', 'us-west-2') s3_conn.create_bucket(Bucket='test-bucket') zip_content = get_test_zip_file2() s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) conn = boto3.client('lambda', 'us-west-2') conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) success_result = conn.delete_function(FunctionName='testFunction') # this is hard to match against, so remove it success_result['ResponseMetadata'].pop('HTTPHeaders', None) # Botocore inserts retry attempts not seen in Python27 success_result['ResponseMetadata'].pop('RetryAttempts', None) success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}}) conn.delete_function.when.called_with( FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') def test_list_create_list_get_delete_list(): """ test `list -> create -> list -> get -> delete -> list` integration """ s3_conn = boto3.client('s3', 'us-west-2') s3_conn.create_bucket(Bucket='test-bucket') zip_content = get_test_zip_file2() s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) conn = boto3.client('lambda', 'us-west-2') conn.list_functions()['Functions'].should.have.length_of(0) conn.create_function( FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', }, Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, ) expected_function_result = { "Code": { "Location": "s3://lambda-functions.aws.amazon.com/test.zip", "RepositoryType": "S3" }, "Configuration": { "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", "Timeout": 3, "Version": '$LATEST', "VpcConfig": { "SecurityGroupIds": [], "SubnetIds": [], } }, 'ResponseMetadata': {'HTTPStatusCode': 200}, } func = conn.list_functions()['Functions'][0] func.pop('LastModified') func.should.equal(expected_function_result['Configuration']) func = conn.get_function(FunctionName='testFunction') # this is hard to match against, so remove it func['ResponseMetadata'].pop('HTTPHeaders', None) # Botocore inserts retry attempts not seen in Python27 func['ResponseMetadata'].pop('RetryAttempts', None) func['Configuration'].pop('LastModified') func.should.equal(expected_function_result) conn.delete_function(FunctionName='testFunction') conn.list_functions()['Functions'].should.have.length_of(0) @mock_lambda def test_invoke_lambda_error(): lambda_fx = """ def lambda_handler(event, context): raise Exception('failsauce') """ zip_output = io.BytesIO() zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) zip_file.writestr('lambda_function.zip', lambda_fx) zip_file.close() zip_output.seek(0) client = boto3.client('lambda', region_name='us-east-1') client.create_function( FunctionName='test-lambda-fx', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.lambda_handler', Description='test lambda function', Timeout=3, MemorySize=128, Publish=True, Code={ 'ZipFile': zip_output.read() }, ) result = client.invoke( FunctionName='test-lambda-fx', InvocationType='RequestResponse', LogType='Tail' ) assert 'FunctionError' in result assert result['FunctionError'] == 'Handled'
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A git command for managing a local cache of git repositories.""" from __future__ import print_function import errno import logging import optparse import os import tempfile import time import subprocess import sys import urlparse import zipfile from download_from_google_storage import Gsutil import gclient_utils import subcommand try: # pylint: disable=E0602 WinErr = WindowsError except NameError: class WinErr(Exception): pass class LockError(Exception): pass class Lockfile(object): """Class to represent a cross-platform process-specific lockfile.""" def __init__(self, path): self.path = os.path.abspath(path) self.lockfile = self.path + ".lock" self.pid = os.getpid() def _read_pid(self): """Read the pid stored in the lockfile. Note: This method is potentially racy. By the time it returns the lockfile may have been unlocked, removed, or stolen by some other process. """ try: with open(self.lockfile, 'r') as f: pid = int(f.readline().strip()) except (IOError, ValueError): pid = None return pid def _make_lockfile(self): """Safely creates a lockfile containing the current pid.""" open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) fd = os.open(self.lockfile, open_flags, 0o644) f = os.fdopen(fd, 'w') print(self.pid, file=f) f.close() def _remove_lockfile(self): """Delete the lockfile. Complains (implicitly) if it doesn't exist. See gclient_utils.py:rmtree docstring for more explanation on the windows case. """ if sys.platform == 'win32': lockfile = os.path.normcase(self.lockfile) for _ in xrange(3): exitcode = subprocess.call(['cmd.exe', '/c', 'del', '/f', '/q', lockfile]) if exitcode == 0: return time.sleep(3) raise LockError('Failed to remove lock: %s' % lockfile) else: os.remove(self.lockfile) def lock(self): """Acquire the lock. Note: This is a NON-BLOCKING FAIL-FAST operation. Do. Or do not. There is no try. """ try: self._make_lockfile() except OSError as e: if e.errno == errno.EEXIST: raise LockError("%s is already locked" % self.path) else: raise LockError("Failed to create %s (err %s)" % (self.path, e.errno)) def unlock(self): """Release the lock.""" if not self.is_locked(): raise LockError("%s is not locked" % self.path) if not self.i_am_locking(): raise LockError("%s is locked, but not by me" % self.path) self._remove_lockfile() def break_lock(self): """Remove the lock, even if it was created by someone else.""" try: self._remove_lockfile() return True except OSError as exc: if exc.errno == errno.ENOENT: return False else: raise def is_locked(self): """Test if the file is locked by anyone. Note: This method is potentially racy. By the time it returns the lockfile may have been unlocked, removed, or stolen by some other process. """ return os.path.exists(self.lockfile) def i_am_locking(self): """Test if the file is locked by this process.""" return self.is_locked() and self.pid == self._read_pid() def __enter__(self): self.lock() return self def __exit__(self, *_exc): # Windows is unreliable when it comes to file locking. YMMV. try: self.unlock() except WinErr: pass class Mirror(object): git_exe = 'git.bat' if sys.platform.startswith('win') else 'git' gsutil_exe = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'third_party', 'gsutil', 'gsutil') bootstrap_bucket = 'chromium-git-cache' def __init__(self, url, refs=None, print_func=None): self.url = url self.refs = refs or [] self.basedir = self.UrlToCacheDir(url) self.mirror_path = os.path.join(self.GetCachePath(), self.basedir) self.print = print_func or print @staticmethod def UrlToCacheDir(url): """Convert a git url to a normalized form for the cache dir path.""" parsed = urlparse.urlparse(url) norm_url = parsed.netloc + parsed.path if norm_url.endswith('.git'): norm_url = norm_url[:-len('.git')] return norm_url.replace('-', '--').replace('/', '-').lower() @staticmethod def FindExecutable(executable): """This mimics the "which" utility.""" path_folders = os.environ.get('PATH').split(os.pathsep) for path_folder in path_folders: target = os.path.join(path_folder, executable) # Just incase we have some ~/blah paths. target = os.path.abspath(os.path.expanduser(target)) if os.path.isfile(target) and os.access(target, os.X_OK): return target if sys.platform.startswith('win'): for suffix in ('.bat', '.cmd', '.exe'): alt_target = target + suffix if os.path.isfile(alt_target) and os.access(alt_target, os.X_OK): return alt_target return None @classmethod def SetCachePath(cls, cachepath): setattr(cls, 'cachepath', cachepath) @classmethod def GetCachePath(cls): if not hasattr(cls, 'cachepath'): try: cachepath = subprocess.check_output( [cls.git_exe, 'config', '--global', 'cache.cachepath']).strip() except subprocess.CalledProcessError: cachepath = None if not cachepath: raise RuntimeError('No global cache.cachepath git configuration found.') setattr(cls, 'cachepath', cachepath) return getattr(cls, 'cachepath') def RunGit(self, cmd, **kwargs): """Run git in a subprocess.""" cwd = kwargs.setdefault('cwd', self.mirror_path) kwargs.setdefault('print_stdout', False) kwargs.setdefault('filter_fn', self.print) env = kwargs.get('env') or kwargs.setdefault('env', os.environ.copy()) env.setdefault('GIT_ASKPASS', 'true') env.setdefault('SSH_ASKPASS', 'true') self.print('running "git %s" in "%s"' % (' '.join(cmd), cwd)) gclient_utils.CheckCallAndFilter([self.git_exe] + cmd, **kwargs) def config(self, cwd=None): if cwd is None: cwd = self.mirror_path self.RunGit(['config', 'core.deltaBaseCacheLimit', gclient_utils.DefaultDeltaBaseCacheLimit()], cwd=cwd) self.RunGit(['config', 'remote.origin.url', self.url], cwd=cwd) self.RunGit(['config', '--replace-all', 'remote.origin.fetch', '+refs/heads/*:refs/heads/*'], cwd=cwd) for ref in self.refs: ref = ref.lstrip('+').rstrip('/') if ref.startswith('refs/'): refspec = '+%s:%s' % (ref, ref) else: refspec = '+refs/%s/*:refs/%s/*' % (ref, ref) self.RunGit(['config', '--add', 'remote.origin.fetch', refspec], cwd=cwd) def bootstrap_repo(self, directory): """Bootstrap the repo from Google Stroage if possible.""" python_fallback = False if sys.platform.startswith('win') and not self.FindExecutable('7z'): python_fallback = True elif sys.platform.startswith('darwin'): # The OSX version of unzip doesn't support zip64. python_fallback = True elif not self.FindExecutable('unzip'): python_fallback = True gs_folder = 'gs://%s/%s' % (self.bootstrap_bucket, self.basedir) gsutil = Gsutil( self.gsutil_exe, boto_path=os.devnull, bypass_prodaccess=True) # Get the most recent version of the zipfile. _, ls_out, _ = gsutil.check_call('ls', gs_folder) ls_out_sorted = sorted(ls_out.splitlines()) if not ls_out_sorted: # This repo is not on Google Storage. return False latest_checkout = ls_out_sorted[-1] # Download zip file to a temporary directory. try: tempdir = tempfile.mkdtemp() self.print('Downloading %s' % latest_checkout) code, out, err = gsutil.check_call('cp', latest_checkout, tempdir) if code: self.print('%s\n%s' % (out, err)) return False filename = os.path.join(tempdir, latest_checkout.split('/')[-1]) # Unpack the file with 7z on Windows, unzip on linux, or fallback. if not python_fallback: if sys.platform.startswith('win'): cmd = ['7z', 'x', '-o%s' % directory, '-tzip', filename] else: cmd = ['unzip', filename, '-d', directory] retcode = subprocess.call(cmd) else: try: with zipfile.ZipFile(filename, 'r') as f: f.printdir() f.extractall(directory) except Exception as e: self.print('Encountered error: %s' % str(e), file=sys.stderr) retcode = 1 else: retcode = 0 finally: # Clean up the downloaded zipfile. gclient_utils.rmtree(tempdir) if retcode: self.print( 'Extracting bootstrap zipfile %s failed.\n' 'Resuming normal operations.' % filename) return False return True def exists(self): return os.path.isfile(os.path.join(self.mirror_path, 'config')) def populate(self, depth=None, shallow=False, bootstrap=False, verbose=False): if shallow and not depth: depth = 10000 gclient_utils.safe_makedirs(self.GetCachePath()) v = [] if verbose: v = ['-v', '--progress'] d = [] if depth: d = ['--depth', str(depth)] with Lockfile(self.mirror_path): # Setup from scratch if the repo is new or is in a bad state. tempdir = None if not os.path.exists(os.path.join(self.mirror_path, 'config')): gclient_utils.rmtree(self.mirror_path) tempdir = tempfile.mkdtemp( suffix=self.basedir, dir=self.GetCachePath()) bootstrapped = not depth and bootstrap and self.bootstrap_repo(tempdir) if not bootstrapped: self.RunGit(['init', '--bare'], cwd=tempdir) else: if depth and os.path.exists(os.path.join(self.mirror_path, 'shallow')): logging.warn( 'Shallow fetch requested, but repo cache already exists.') d = [] rundir = tempdir or self.mirror_path self.config(rundir) fetch_cmd = ['fetch'] + v + d + ['origin'] fetch_specs = subprocess.check_output( [self.git_exe, 'config', '--get-all', 'remote.origin.fetch'], cwd=rundir).strip().splitlines() for spec in fetch_specs: try: self.RunGit(fetch_cmd + [spec], cwd=rundir, retry=True) except subprocess.CalledProcessError: logging.warn('Fetch of %s failed' % spec) if tempdir: os.rename(tempdir, self.mirror_path) def update_bootstrap(self): # The files are named <git number>.zip gen_number = subprocess.check_output( [self.git_exe, 'number', 'master'], cwd=self.mirror_path).strip() self.RunGit(['gc']) # Run Garbage Collect to compress packfile. # Creating a temp file and then deleting it ensures we can use this name. _, tmp_zipfile = tempfile.mkstemp(suffix='.zip') os.remove(tmp_zipfile) subprocess.call(['zip', '-r', tmp_zipfile, '.'], cwd=self.mirror_path) gsutil = Gsutil(path=self.gsutil_exe, boto_path=None) dest_name = 'gs://%s/%s/%s.zip' % ( self.bootstrap_bucket, self.basedir, gen_number) gsutil.call('cp', tmp_zipfile, dest_name) os.remove(tmp_zipfile) def unlock(self): lf = Lockfile(self.mirror_path) config_lock = os.path.join(self.mirror_path, 'config.lock') if os.path.exists(config_lock): os.remove(config_lock) lf.break_lock() @subcommand.usage('[url of repo to check for caching]') def CMDexists(parser, args): """Check to see if there already is a cache of the given repo.""" _, args = parser.parse_args(args) if not len(args) == 1: parser.error('git cache exists only takes exactly one repo url.') url = args[0] mirror = Mirror(url) if mirror.exists(): print(mirror.mirror_path) return 0 return 1 @subcommand.usage('[url of repo to create a bootstrap zip file]') def CMDupdate_bootstrap(parser, args): """Create and uploads a bootstrap tarball.""" # Lets just assert we can't do this on Windows. if sys.platform.startswith('win'): print('Sorry, update bootstrap will not work on Windows.', file=sys.stderr) return 1 # First, we need to ensure the cache is populated. populate_args = args[:] populate_args.append('--no_bootstrap') CMDpopulate(parser, populate_args) # Get the repo directory. _, args = parser.parse_args(args) url = args[0] mirror = Mirror(url) mirror.update_bootstrap() return 0 @subcommand.usage('[url of repo to add to or update in cache]') def CMDpopulate(parser, args): """Ensure that the cache has all up-to-date objects for the given repo.""" parser.add_option('--depth', type='int', help='Only cache DEPTH commits of history') parser.add_option('--shallow', '-s', action='store_true', help='Only cache 10000 commits of history') parser.add_option('--ref', action='append', help='Specify additional refs to be fetched') parser.add_option('--no_bootstrap', action='store_true', help='Don\'t bootstrap from Google Storage') options, args = parser.parse_args(args) if not len(args) == 1: parser.error('git cache populate only takes exactly one repo url.') url = args[0] mirror = Mirror(url, refs=options.ref) kwargs = { 'verbose': options.verbose, 'shallow': options.shallow, 'bootstrap': not options.no_bootstrap, } if options.depth: kwargs['depth'] = options.depth mirror.populate(**kwargs) @subcommand.usage('[url of repo to unlock, or -a|--all]') def CMDunlock(parser, args): """Unlock one or all repos if their lock files are still around.""" parser.add_option('--force', '-f', action='store_true', help='Actually perform the action') parser.add_option('--all', '-a', action='store_true', help='Unlock all repository caches') options, args = parser.parse_args(args) if len(args) > 1 or (len(args) == 0 and not options.all): parser.error('git cache unlock takes exactly one repo url, or --all') repo_dirs = [] if not options.all: url = args[0] repo_dirs.append(Mirror(url).mirror_path) else: cachepath = Mirror.GetCachePath() repo_dirs = [os.path.join(cachepath, path) for path in os.listdir(cachepath) if os.path.isdir(os.path.join(cachepath, path))] repo_dirs.extend([os.path.join(cachepath, lockfile.replace('.lock', '')) for lockfile in os.listdir(cachepath) if os.path.isfile(os.path.join(cachepath, lockfile)) and lockfile.endswith('.lock') and os.path.join(cachepath, lockfile) not in repo_dirs]) lockfiles = [repo_dir + '.lock' for repo_dir in repo_dirs if os.path.exists(repo_dir + '.lock')] if not options.force: parser.error('git cache unlock requires -f|--force to do anything. ' 'Refusing to unlock the following repo caches: ' ', '.join(lockfiles)) unlocked_repos = [] untouched_repos = [] for repo_dir in repo_dirs: lf = Lockfile(repo_dir) config_lock = os.path.join(repo_dir, 'config.lock') unlocked = False if os.path.exists(config_lock): os.remove(config_lock) unlocked = True if lf.break_lock(): unlocked = True if unlocked: unlocked_repos.append(repo_dir) else: untouched_repos.append(repo_dir) if unlocked_repos: logging.info('Broke locks on these caches:\n %s' % '\n '.join( unlocked_repos)) if untouched_repos: logging.debug('Did not touch these caches:\n %s' % '\n '.join( untouched_repos)) class OptionParser(optparse.OptionParser): """Wrapper class for OptionParser to handle global options.""" def __init__(self, *args, **kwargs): optparse.OptionParser.__init__(self, *args, prog='git cache', **kwargs) self.add_option('-c', '--cache-dir', help='Path to the directory containing the cache') self.add_option('-v', '--verbose', action='count', default=0, help='Increase verbosity (can be passed multiple times)') def parse_args(self, args=None, values=None): options, args = optparse.OptionParser.parse_args(self, args, values) try: global_cache_dir = Mirror.GetCachePath() except RuntimeError: global_cache_dir = None if options.cache_dir: if global_cache_dir and ( os.path.abspath(options.cache_dir) != os.path.abspath(global_cache_dir)): logging.warn('Overriding globally-configured cache directory.') Mirror.SetCachePath(options.cache_dir) levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)]) return options, args def main(argv): dispatcher = subcommand.CommandDispatcher(__name__) return dispatcher.execute(OptionParser(), argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
import sys import unittest from pympler.util.compat import HTMLParser, HTTPConnection, StringIO from pympler.util.compat import Request, urlopen, URLError from socket import error as socket_error from time import sleep from pympler.classtracker import ClassTracker from pympler.garbagegraph import start_debug_garbage, end_debug_garbage from pympler.process import get_current_thread_id from pympler.web import start_profiler, start_in_background # Use separate process for server if available. Otherwise use a thread. #try: # from multiprocessing import Process #except ImportError: from threading import Thread as Process _server = None class Trash(object): pass class Server(Process): def __init__(self): super(Server, self).__init__() self.daemon = True def run(self): """ Redirect bottle logging messages so it doesn't clutter the test output and start the web GUI. """ tracker = ClassTracker() tracker.track_class(Trash) tracked_trash = Trash() tracker.create_snapshot() sys.stdout = StringIO() sys.stderr = StringIO() start_profiler(debug=True, quiet=True, tracker=tracker) class WebGuiTest(unittest.TestCase): defaulthost = 'localhost:8090' defaulturl = 'http://' + defaulthost class LinkChecker(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.errors = 0 def follow(self, link): if link.startswith('http://'): return conn = HTTPConnection(WebGuiTest.defaulthost) conn.request("GET", link) response = conn.getresponse() body = response.read() conn.close() if response.status not in [200, 302, 303, 307]: sys.stderr.write('\nLINK-ERROR: %s, %d, %s' % (link, response.status, response.reason)) if response.status == 500: sys.stderr.write(body) self.errors += 1 def handle_starttag(self, tag, attrs): if tag == 'a': for name, value in attrs: if name == 'href': self.follow(value) def setUp(self): """Use the same server for all tests.""" global _server if not _server: _server = Server() _server.start() wait = 5 running = False while not running and wait > 0: try: urlopen(WebGuiTest.defaulturl).read() running = True except (URLError, socket_error, IOError): wait -= 1 sleep(1) def get(self, url, status=200): conn = HTTPConnection(self.defaulthost) conn.request("GET", url) response = conn.getresponse() body = response.read() conn.close() self.assertEqual(response.status, status) try: body = body.decode() except UnicodeDecodeError: pass return body def test_overview(self): """Test availability of web gui.""" req = Request(self.defaulturl) page = str(urlopen(req).read()) self.assert_("Process overview" in page) def test_links(self): """Test all linked pages are available.""" req = Request(self.defaulturl) page = str(urlopen(req).read()) parser = self.LinkChecker() parser.feed(page) parser.close() self.assertEqual(parser.errors, 0) def test_static_files(self): """Test if static files are served.""" for filename in ['style.css', 'jquery.flot.min.js']: self.get('/static/%s' % filename, status=200) def test_traceback(self): """Test if stack traces can be viewed. First test valid tracebacks, then the invalid ones. Also check if we can expand the locals of the current stackframe and access size information of local data (dummy). """ class Dummy(object): pass dummy = Dummy() for threadid in sys._current_frames(): resp = self.get('/traceback/%d' % threadid, status=200) if threadid == get_current_thread_id(): locals_id = id(locals()) self.assertTrue('id="%d' % locals_id in resp, resp) resp = self.get('/objects/%d' % locals_id, status=200) self.assertTrue('dummy' in resp, resp) self.assertTrue('id="%d' % id(dummy) in resp, resp) self.get('/objects/%d' % id(dummy), status=200) self.get('/traceback/gabelstapler', status=500) body = self.get('/traceback/12345', status=200) self.assertTrue("Cannot retrieve stacktrace for thread 12345" in body, body) def test_garbage(self): """Test if reference cycles can be viewed.""" start_debug_garbage() try: body = self.get('/garbage', status=200) #self.assertTrue('0 reference cycles' in body, body) cycle = ['spam', 'eggs'] cycle.append(cycle) del cycle body = self.get('/garbage', status=200) #self.assertTrue('0 reference cycles' in body, body) body = self.get('/refresh', status=303) body = self.get('/garbage', status=200) #self.assertTrue('1 reference cycle' in body, body) self.assertTrue('/garbage/0' in body) body = self.get('/garbage/0', status=200) self.assertTrue('/garbage/graph/0' in body, body) self.assertTrue('/garbage/graph/0?reduce=' in body, body) body = self.get('/garbage/graph/0', status=200) body = self.get('/garbage/graph/0?reduce=on', status=200) finally: end_debug_garbage() def test_tracker(self): resp = self.get('/tracker', status=200) clsname = '%s.Trash' % (Trash.__module__) self.assertTrue(clsname in resp, resp) resp = self.get('/tracker/class/%s' % clsname, status=200) self.assertTrue('1 instance' in resp, resp) def test_start_in_background(self): """Test server can be started in background mode.""" tracker = ClassTracker() thread = start_in_background(port=64546, stats=tracker.stats) self.assertEqual(thread.daemon, True) if __name__ == "__main__": suite = unittest.TestSuite() tclasses = [WebGuiTest,] for tclass in tclasses: names = unittest.getTestCaseNames(tclass, 'test_') suite.addTests(map(tclass, names)) if not unittest.TextTestRunner().run(suite).wasSuccessful(): sys.exit(1)
#!/usr/bin/python #---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- """ Framework to run a GENI Aggregate Manager. See geni/am for the Reference Aggregate Manager that this runs. Run with "-h" flag to see usage and command line options. """ import importlib import pdb import sys import subprocess import time # Check python version. Requires 2.6 or greater, but less than 3. if sys.version_info < (2, 6): raise Exception('Must use python 2.6 or greater.') elif sys.version_info >= (3,): raise Exception('Not python 3 ready') import threading import logging import optparse import os import gcf.geni import gram import gram.am import gram.am.am3 import gram.am.gram_am2 import gram.am.gram.config from gcf.geni.config import read_config # Return an instance of a class given by fully qualified name # (module_path.classname) # Return an instance of a class given by fully qualified name # (module_path.classname) with variable constructor args def getInstanceFromClassname(class_name, *argv): class_module_name = ".".join(class_name.split('.')[:-1]) class_base_name = class_name.split('.')[-1] class_module = importlib.import_module(class_module_name) class_instance = eval("class_module.%s" % class_base_name) object_instance = class_instance(*argv) return object_instance # Set up parser and return parsed argumetns def parse_args(argv): parser = optparse.OptionParser() parser.add_option("-k", "--keyfile", help="AM key file name", metavar="FILE") parser.add_option("-g", "--certfile", help="AM certificate file name (PEM format)", metavar="FILE") parser.add_option("-c", "--configfile", help="config file path", metavar="FILE") # Note: The trusted CH certificates are _not_ enough here. # It needs self signed certificates. EG CA certificates. parser.add_option("-r", "--rootcadir", help="Trusted Root certificates directory (files in PEM format)", metavar="FILE") # Could try to determine the real IP Address instead of the loopback # using socket.gethostbyname(socket.gethostname()) parser.add_option("-H", "--host", help="server ip", metavar="HOST") parser.add_option("-p", "--v3_port", type=int, help="V3 server port", metavar="PORT") parser.add_option("-q", "--v2_port", type=int, help="V2 server port", metavar="PORT") parser.add_option("--debug", action="store_true", default=False, help="enable debugging output") parser.add_option("-V", "--api-version", type=int, help="AM API Version", default=3) parser.add_option("--snapshot_dir", \ help="name of directory to save snapshots", \ default=None) parser.add_option("--recover_from_snapshot", \ help="name of snapshot to initialize gram state", \ default=None) parser.add_option("--recover_from_most_recent_snapshot", \ help="whether to recover from most recent " + \ "snapshot in 'gram_snapshot_directory'", \ default=True) parser.add_option("--snapshot_maintain_limit", type=int, help="Retain only this limit of recent snapshots", default=10) parser.add_option("--config_file", help="Location of GRAM installation-specific " + "configuration", default="/etc/gram/config.json") return parser.parse_args() def getAbsPath(path): """Return None or a normalized absolute path version of the argument string. Does not check that the path exists.""" if path is None: return None if path.strip() == "": return None path = os.path.normcase(os.path.expanduser(path)) if os.path.isabs(path): return path else: return os.path.abspath(path) def main(argv=None): if argv is None: argv = sys.argv opts = parse_args(argv)[0] gram.am.gram.config.initialize(opts.config_file) # If the port isn't set explicitly, use defaults from config if not opts.v3_port: opts.v3_port = gram.am.gram.config.gram_am_port if not opts.v2_port: opts.v2_port = gram.am.gram.config.gram_am_v2_port level = logging.INFO if opts.debug: level = logging.DEBUG logging.basicConfig(level=level, format = '%(asctime)s %(message)s') # Read in config file options, command line gets priority optspath = None if not opts.configfile is None: optspath = os.path.expanduser(opts.configfile) config = read_config(optspath) for (key,val) in config['aggregate_manager'].items(): if hasattr(opts,key) and getattr(opts,key) is None: setattr(opts,key,val) if not hasattr(opts,key): setattr(opts,key,val) if getattr(opts,'rootcadir') is None: setattr(opts,'rootcadir',config['global']['rootcadir']) if opts.rootcadir is None: sys.exit('Missing path to trusted root certificate directory (-r argument)') certfile = getAbsPath(opts.certfile) keyfile = getAbsPath(opts.keyfile) if not os.path.exists(certfile): sys.exit("Aggregate certfile %s doesn't exist" % certfile) if not os.path.exists(keyfile): sys.exit("Aggregate keyfile %s doesn't exist" % keyfile) # Check if quantum is running, if not, then take a nap command_str = '%s net-list' % gram.am.gram.config.network_type command = command_str.split() ready = 0 while(not ready): try : subprocess.check_output(command) ready = 1 logging.getLogger('gram-am').info(' Ready to start GRAM') except : logging.getLogger('gram-am').error('Error executing command %s' % command) time.sleep(15) gram.am.gram.config.snapshot_dir = opts.snapshot_dir gram.am.gram.config.recover_from_snapshot = opts.recover_from_snapshot gram.am.gram.config.recover_from_most_recent_snapshot = \ opts.recover_from_most_recent_snapshot gram.am.gram.config.snapshot_maintain_limit = opts.snapshot_maintain_limit # Instantiate an argument guard that will reject or modify # arguments and options provided to calls argument_guard = None if hasattr(opts, 'argument_guard'): argument_guard = getInstanceFromClassname(opts.argument_guard) # Instantiate authorizer from 'authorizer' config argument # By default, use the SFA authorizer if hasattr(opts, 'authorizer'): authorizer_classname = opts.authorizer else: authorizer_classname = "gcf.geni.auth.sfa_authorizer.SFA_Authorizer" authorizer = getInstanceFromClassname(authorizer_classname, opts.rootcadir, opts, argument_guard) # Use XMLRPC authorizer if opt.remote_authorizer is set if hasattr(opts, 'remote_authorizer'): import xmlrpclib authorizer = xmlrpclib.Server(opts.remote_authorizer) # Instantiate resource manager from 'authorizer_resource_manager' # config argument. Default = None resource_manager = None if hasattr(opts, 'authorizer_resource_manager'): resource_manager = \ getInstanceFromClassname(opts.authorizer_resource_manager) # rootcadir is dir of multiple certificates delegate = gcf.geni.ReferenceAggregateManager(getAbsPath(opts.rootcadir)) # here rootcadir is supposed to be a single file with multiple # certs possibly concatenated together comboCertsFile = gcf.geni.CredentialVerifier.getCAsFileFromDir(getAbsPath(opts.rootcadir)) server_url = "https://%s:%d/" % (opts.host, int(opts.v3_port)) GRAM=gram.am.am3.GramReferenceAggregateManager(getAbsPath(opts.rootcadir), config['global']['base_name'], certfile, server_url) if opts.api_version == 1: msg = "Version 1 of AM API unsopported in GRAM" sys.exit(msg) #elif opts.api_version == 2: ams_v2 = gram.am.gram_am2.GramAggregateManagerServer((opts.host, int(opts.v2_port)), keyfile=keyfile, certfile=certfile, trust_roots_dir=getAbsPath(opts.rootcadir), ca_certs=comboCertsFile, base_name=config['global']['base_name'], authorizer=authorizer, resource_manager = resource_manager, GRAM=GRAM) #elif opts.api_version == 3: ams_v3 = gram.am.am3.GramAggregateManagerServer((opts.host, int(opts.v3_port)), keyfile=keyfile, certfile=certfile, trust_roots_dir=getAbsPath(opts.rootcadir), ca_certs=comboCertsFile, base_name=config['global']['base_name'], authorizer=authorizer, resource_manager = resource_manager, GRAM=GRAM) #else: # msg = "Unknown API version: %d. Valid choices are \"1\", \"2\", or \"3\"" # sys.exit(msg % (opts.api_version)) logging.getLogger('gcf-am').info('GENI AM 3 Listening on port %s...' % (opts.v3_port)) logging.getLogger('gcf-am').info('GENI AM 2 Listening on port %s...' % (opts.v2_port)) thread = threading.Thread(target=ams_v2.serve_forever,args=()) thread.start() ams_v3.serve_forever() if __name__ == "__main__": sys.exit(main())
from numpy import asarray import numpy as np import copy ListType = list TupleType = tuple StringType = str def abut(source, *args): # comment: except for the repetition, this is equivalent to hstack. """\nLike the |Stat abut command. It concatenates two arrays column-wise and returns the result. CAUTION: If one array is shorter, it will be repeated until it is as long as the other. Format: abut (source, args) where args=any # of arrays Returns: an array as long as the LONGEST array past, source appearing on the 'left', arrays in <args> attached on the 'right'.\n""" source = asarray(source) if len(source.shape)==1: width = 1 source = np.resize(source,[source.shape[0],width]) else: width = source.shape[1] for addon in args: if len(addon.shape)==1: width = 1 addon = np.resize(addon,[source.shape[0],width]) else: width = source.shape[1] if len(addon) < len(source): addon = np.resize(addon,[source.shape[0],addon.shape[1]]) elif len(source) < len(addon): source = np.resize(source,[addon.shape[0],source.shape[1]]) source = np.concatenate((source,addon),1) return source def unique(inarray): """Returns unique items in the FIRST dimension of the passed array. Only works on arrays NOT including string items (e.g., type 'O' or 'c'). """ inarray = asarray(inarray) uniques = np.array([inarray[0]]) if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY for item in inarray[1:]: if np.add.reduce(np.equal(uniques,item).flat) == 0: try: uniques = np.concatenate([uniques,np.array[np.newaxis,:]]) except TypeError: uniques = np.concatenate([uniques,np.array([item])]) else: # IT MUST BE A 2+D ARRAY if inarray.dtype.char != 'O': # not an Object array for item in inarray[1:]: if not np.sum(np.alltrue(np.equal(uniques,item),1),axis=0): try: uniques = np.concatenate( [uniques,item[np.newaxis,:]] ) except TypeError: # the item to add isn't a list uniques = np.concatenate([uniques,np.array([item])]) else: pass # this item is already in the uniques array else: # must be an Object array, alltrue/equal functions don't work for item in inarray[1:]: newflag = 1 for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=> test = np.sum(abs(np.array(map(cmp,item,unq))),axis=0) if test == 0: # if item identical to any 1 row in uniques newflag = 0 # then not a novel item to add break if newflag == 1: try: uniques = np.concatenate( [uniques,item[np.newaxis,:]] ) except TypeError: # the item to add isn't a list uniques = np.concatenate([uniques,np.array([item])]) return uniques def colex(a, indices, axis=1): """\nExtracts specified indices (a list) from passed array, along passed axis (column extraction is default). BEWARE: A 1D array is presumed to be a column-array (and that the whole array will be returned as a column). Returns: the columns of a specified by indices\n""" if type(indices) not in [ListType,TupleType,np.ndarray]: indices = [indices] if len(np.shape(a)) == 1: cols = np.resize(a,[a.shape[0],1]) else: cols = np.take(a,indices,axis) return cols def adm(a, criterion): """\nReturns rows from the passed list of lists that meet the criteria in the passed criterion expression (a string). Format: adm (a,criterion) where criterion is like 'x[2]==37'\n""" lines = eval('filter(lambda x: '+criterion+',a)') try: lines = np.array(lines) except: lines = np.array(lines,'O') return lines def linexand(a, columnlist, valuelist): """Returns the rows of an array where col (from columnlist) = val (from valuelist). One value is required for each column in columnlist. Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i\n""" a = asarray(a) if type(columnlist) not in [ListType,TupleType,np.ndarray]: columnlist = [columnlist] if type(valuelist) not in [ListType,TupleType,np.ndarray]: valuelist = [valuelist] criterion = '' for i in range(len(columnlist)): if type(valuelist[i])==StringType: critval = '\'' + valuelist[i] + '\'' else: critval = str(valuelist[i]) criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and' criterion = criterion[0:-3] # remove the "and" after the last crit return adm(a,criterion) def collapse(a, keepcols, collapsecols, stderr=0, ns=0, cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s,axis=0) of column(s) specified by collapsecols Examples -------- import numpy as np from scipy import stats xx = np.array([[ 0., 0., 1.], [ 1., 1., 1.], [ 2., 2., 1.], [ 0., 3., 1.], [ 1., 4., 1.], [ 2., 5., 1.], [ 0., 6., 1.], [ 1., 7., 1.], [ 2., 8., 1.], [ 0., 9., 1.]]) >>> stats._support.collapse(xx, (0), (1,2), stderr=0, ns=0, cfcn=None) array([[ 0. , 4.5, 1. ], [ 0. , 4.5, 1. ], [ 1. , 4. , 1. ], [ 1. , 4. , 1. ], [ 2. , 5. , 1. ], [ 2. , 5. , 1. ]]) >>> stats._support.collapse(xx, (0), (1,2), stderr=1, ns=1, cfcn=None) array([[ 0. , 4.5 , 1.93649167, 4. , 1. , 0. , 4. ], [ 0. , 4.5 , 1.93649167, 4. , 1. , 0. , 4. ], [ 1. , 4. , 1.73205081, 3. , 1. , 0. , 3. ], [ 1. , 4. , 1.73205081, 3. , 1. , 0. , 3. ], [ 2. , 5. , 1.73205081, 3. , 1. , 0. , 3. ], [ 2. , 5. , 1.73205081, 3. , 1. , 0. , 3. ]]) """ if cfcn is None: cfcn = lambda(x): np.mean(x, axis=0) a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,np.ndarray]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values).tolist() # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,np.ndarray]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if stderr: if len(avgcol)>1: item.append(compute_stderr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = np.array(newlist) except TypeError: new_a = np.array(newlist,'O') return new_a def _chk_asarray(a, axis): if axis is None: a = np.ravel(a) outaxis = 0 else: a = np.asarray(a) outaxis = axis return a, outaxis def _chk2_asarray(a, b, axis): if axis is None: a = np.ravel(a) b = np.ravel(b) outaxis = 0 else: a = np.asarray(a) b = np.asarray(b) outaxis = axis return a, b, outaxis def compute_stderr(a, axis=0, ddof=1): a, axis = _chk_asarray(a, axis) return np.std(a,axis,ddof=1) / float(np.sqrt(a.shape[axis]))
''' receives BGP messages and assign them to the set of SMPC workers ''' import argparse from multiprocessing.connection import Listener import os import Queue from threading import Thread import sys np = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if np not in sys.path: sys.path.append(np) import json import util.log from server_pprs import server as Server import random import os import pickle import subprocess from time import sleep import time import multiprocessing as mp from multiprocessing import Process, Manager import prio_worker_rs2 from load_ribs import load_ribs from Queue import Empty import threading import port_config logger = util.log.getLogger('prio-handler-rs2') RS2_MODE = 2 class PrioHandlerRs2(object): def __init__(self, asn_2_id_file, rib_file, number_of_processes): logger.info("Initializing the All Handler for RS2.") self.number_of_processes = number_of_processes with open(asn_2_id_file, 'r') as f: self.asn_2_id = json.load(f) self.prefix_2_nh_id_2_route_id = load_ribs(rib_file, self.asn_2_id, RS2_MODE) if rib_file else {} # Initialize a XRS Server self.server_receive_bgp_messages = Server(logger, endpoint=(port_config.process_assignement["rs2"], port_config.ports_assignment["rs2_receive_bgp_messages"])) # NOTE: fake sending, only for performance test #self.server_send_mpc_output = Server(logger, endpoint=(port_config.process_assignement["rs2"], port_config.ports_assignment["rs2_send_mpc_output"])) self.server_rs1 = Server(logger, endpoint=(port_config.process_assignement["rs2"], port_config.ports_assignment["rs1_rs2"])) self.run = True # start the MPC process in background self.receive_mappings_from_rs1_th = Thread(target=self.receive_mappings_from_rs1) self.receive_mappings_from_rs1_th.setName("receive_mappings_from_rs1_th") self.receive_mappings_from_rs1_th.daemon = True self.receive_mappings_from_rs1_th.start() self.id_2_msg = mp.Manager().dict() self.id_2_port = mp.Manager().dict() self.port2stop = mp.Manager().dict() self.lock = mp.Manager().Lock() self.port2stop_lock = mp.Manager().Lock() self.stop_port = False self.handler_2_worker_queues={} self.worker_2_handler_queue = mp.Queue() for i in range(port_config.ports_assignment["worker_port"],port_config.ports_assignment["worker_port"]+self.number_of_processes): self.handler_2_worker_queues[i]=mp.Queue() # create workers self.receive_from_workers_th = Thread(target=self.receive_from_workers) self.receive_from_workers_th.setName( "receive_from_workers_th") #self.receive_from_workers_th.daemon = True self.receive_from_workers_th.start() #self.workers_pool = mp.Pool(self.number_of_processes, all_worker_rs2.all_worker_main,(self.handler_to_worker_queue,self.worker_ids_queue,)) # Setup a list of processes that we want to run self.processes = [mp.Process(target=prio_worker_rs2.prio_worker_main, args=(x, self.handler_2_worker_queues[x], self.worker_2_handler_queue )) for x in range(port_config.ports_assignment["worker_port"],port_config.ports_assignment["worker_port"]+self.number_of_processes)] # Run processes for p in self.processes: p.start() def receive_mappings_from_rs1(self): waiting = 0 logger.info("connecting to RS1") self.server_rs1.start() logger.info("connected to RS1 for receiving mapping messages") while self.run: # get BGP messages from ExaBGP try: msg = self.server_rs1.receiver_queue.get(True, 1) msg = pickle.loads(msg) waiting = 0 # Received BGP bgp_update advertisement from ExaBGP if "stop" in msg: self.port2stop_lock.acquire() logger.info("received stop message") port = msg["port"] while not self.handler_2_worker_queues[port].empty(): sleep(1) if self.stop_port: self.handler_2_worker_queues[port].put(msg) self.port2stop[port] = None logger.info("port2stop: " + str(self.port2stop)) if len(self.port2stop) == self.number_of_processes: self.port2stop_lock.release() break self.port2stop_lock.release() continue if msg["type"] == "to-rs2-init": logger.info("received initialization message from rs1") pass if "announcement_id" in msg: announcement_id = msg["announcement_id"] self.lock.acquire() self.id_2_port[announcement_id] = msg["worker_port"] if announcement_id in self.id_2_msg: #send message to the correct worker if self.id_2_msg[announcement_id]["prefix"] not in self.prefix_2_nh_id_2_route_id.keys(): self.prefix_2_nh_id_2_route_id[self.id_2_msg[announcement_id]["prefix"]]={} as_id = self.asn_2_id[self.id_2_msg[announcement_id]["asn"]] self.prefix_2_nh_id_2_route_id[self.id_2_msg[announcement_id]["prefix"]][as_id] = announcement_id self.handler_2_worker_queues[self.id_2_port[announcement_id]].put({"announcement_id" : msg["announcement_id"], "as_id" : as_id, "messages" : self.prefix_2_nh_id_2_route_id[self.id_2_msg[announcement_id]["prefix"]]}) del self.id_2_port[announcement_id] del self.id_2_msg[announcement_id] self.lock.release() except Empty: if waiting == 0: waiting = 1 else: waiting = (waiting % 30) + 1 if waiting == 30: pass logger.debug("closing reception from RS1") def start(self): logger.info("Starting the Server to handle incoming BGP Updates from ExaBGP. Listening on port 6002") self.server_receive_bgp_messages.start() logger.info("Connected to ExaBGP via port 6002") # NOTE: fake sending, only for performance test #self.server_send_mpc_output.start() #logger.info("RS2 connected to Host Receiver Mock ") while self.run: # get BGP messages from ExaBGP waiting = 0 try: msg = self.server_receive_bgp_messages.receiver_queue.get(True, 1) msg = pickle.loads(msg) waiting = 0 # Received BGP bgp_update advertisement from ExaBGP if "stop" in msg: close_msg = {"stop" : 1} logger.info("Shutting down exa receiver.") self.port2stop_lock.acquire() logger.info("received stop message") exit_flag = 0 while not exit_flag: exit_flag = 1 for port in self.port2stop.keys(): if not self.handler_2_worker_queues[port].empty(): exit_flag = 0 sleep(1) for port in self.port2stop.keys(): self.handler_2_worker_queues[port].put(msg) else: self.stop_port = True self.port2stop_lock.release() #self.send_update(close_msg) self.server_receive_bgp_messages.conn.close() time.sleep(5) break else: announcement_id = msg["announcement_id"] self.lock.acquire() self.id_2_msg[announcement_id] = msg if announcement_id in self.id_2_port: #send message to the correct worker if msg["prefix"] not in self.prefix_2_nh_id_2_route_id.keys(): self.prefix_2_nh_id_2_route_id[msg["prefix"]]={} as_id = self.asn_2_id[msg["asn"]] self.prefix_2_nh_id_2_route_id[msg["prefix"]][as_id] = announcement_id self.handler_2_worker_queues[self.id_2_port[announcement_id]].put({"announcement_id" : announcement_id, "as_id": as_id, "messages" : self.prefix_2_nh_id_2_route_id[msg["prefix"]]}) del self.id_2_port[announcement_id] del self.id_2_msg[announcement_id] self.lock.release() except Queue.Empty: if waiting == 0: waiting = 1 else: waiting = (waiting % 30) + 1 if waiting == 30: pass logger.debug("Closing reception from hosts") logger.debug("joining RS1 and worker receiver threads ") self.receive_mappings_from_rs1_th.join() logger.debug("joined RS1 ") self.receive_from_workers_th.join() logger.debug("joined workers ") for p in self.processes: p.join() def receive_from_workers(self): waiting = 0 stop_counter = 0 while True: try: msg = self.worker_2_handler_queue.get(True, 1) if "stop" in msg: logger.debug("received STOP message from worker") stop_counter += 1 if stop_counter == self.number_of_processes: logger.debug("sending STOP message to member") # NOTE: fake sending, only for performance test #self.server_send_mpc_output.sender_queue.put(pickle.dumps(msg)) break continue if msg["type"] == "to-hosts": pass # NOTE: fake sending, only for performance test #self.server_send_mpc_output.sender_queue.put(pickle.dumps(msg)) except Empty: if waiting == 0: waiting = 1 else: waiting = (waiting % 30) + 1 if waiting == 30: pass def stop(self): logger.info("Stopping.") self.run = False def main(): parser = argparse.ArgumentParser() parser.add_argument("asn_2_id_file", type=str, help="specify asn_2_id json file") parser.add_argument('-r', '--rib_file', type=str, help='specify the rib file, eg.g. ../examples/test-rs/ribs/bview') parser.add_argument("-p","--processes", help="number of parallel SMPC processes", type=int, default=1) args = parser.parse_args() pprs = PrioHandlerRs2(args.asn_2_id_file, args.rib_file, args.processes) rs_thread = Thread(target=pprs.start) rs_thread.setName("PrioHandler2") rs_thread.daemon = True rs_thread.start() while rs_thread.is_alive(): try: rs_thread.join(1) #logger.debug("join cycle") except KeyboardInterrupt: pprs.stop() logger.info("waiting before dying") logger.info("thread count: " + str(threading.active_count())) for thread in threading.enumerate(): print thread.name + " " + str(thread.is_alive()) for p in pprs.processes: print p.is_alive() sleep(5) if __name__ == '__main__': main()
import multiprocessing import warnings import numpy import six import chainer from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_device(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): self.device.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': self.device.device.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('GPU devices must be specified.') self._master = optimizer.target self._devices = [chainer.get_device(device) for device in devices] self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with chainer.using_device(self._devices[0]): self._master.to_device(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator( len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with chainer.using_device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() super(MultiprocessParallelUpdater, self).finalize() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
# Copyright (C) 2013 eNovance SAS <[email protected]> # # Author: Sylvain Afchain <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from mock import call from oslo.config import cfg from neutron.services.metering.drivers.iptables import iptables_driver from neutron.tests import base from neutron.tests.unit import test_api_v2 _uuid = test_api_v2._uuid class IptablesDriverTestCase(base.BaseTestCase): def setUp(self): super(IptablesDriverTestCase, self).setUp() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.addCleanup(self.utils_exec_p.stop) self.iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') self.iptables_cls = self.iptables_cls_p.start() self.addCleanup(self.iptables_cls_p.stop) self.iptables_inst = mock.Mock() self.v4filter_inst = mock.Mock() self.v6filter_inst = mock.Mock() self.v4filter_inst.chains = [] self.v6filter_inst.chains = [] self.iptables_inst.ipv4 = {'filter': self.v4filter_inst} self.iptables_inst.ipv6 = {'filter': self.v6filter_inst} self.iptables_cls.return_value = self.iptables_inst cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') cfg.CONF.set_override('root_helper', 'fake_sudo', 'AGENT') self.metering = iptables_driver.IptablesMeteringDriver('metering', cfg.CONF) def test_root_helper(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': []}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) self.iptables_cls.assert_called_with(root_helper='fake_sudo', namespace=mock.ANY, binary_name=mock.ANY, use_ipv6=mock.ANY) def test_add_metering_label(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': []}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_process_metering_label_rules(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'egress', 'excluded': False, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-o qg-7d411f48-ec -s 20.0.0.0/24' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_with_rules(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': True, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), call.add_rule('neutron-meter-r-eeef45da-c60', '-i qg-7d411f48-ec -d 20.0.0.0/24 -j RETURN', wrap=False, top=True)] self.v4filter_inst.assert_has_calls(calls) def test_update_metering_label_rules(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['_metering_labels'][0]['rules'] = [{ 'direction': 'egress', 'excluded': True, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}, {'direction': 'ingress', 'excluded': False, 'id': '6f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}] self.metering.update_metering_label_rules(None, updates) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-o qg-6d411f48-ec -s 10.0.0.0/24 -j RETURN', wrap=False, top=True), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -d 20.0.0.0/24 -j ' 'neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label_rule(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}, {'direction': 'ingress', 'excluded': False, 'id': 'aaaa261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}] }], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}] }], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.update_metering_label_rules(None, routers) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-7d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-7d411f48-ec -d 20.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-7d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}] }], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) self.metering.remove_metering_label(None, routers) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-7d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_update_routers(self): routers = [{'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': True, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['gw_port_id'] = '587b63c1-22a3-40b3-9834-486d1fb215a5' self.metering.update_routers(None, updates) calls = [call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), call.add_rule('neutron-meter-r-eeef45da-c60', '-i qg-7d411f48-ec -d 20.0.0.0/24 -j RETURN', wrap=False, top=True), call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-587b63c1-22 -d 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls)
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests.""" import mock import pytest from google.cloud import dlp_v2 from google.cloud.dlp_v2.proto import dlp_pb2 from google.protobuf import empty_pb2 class MultiCallableStub(object): """Stub for the grpc.UnaryUnaryMultiCallable interface.""" def __init__(self, method, channel_stub): self.method = method self.channel_stub = channel_stub def __call__(self, request, timeout=None, metadata=None, credentials=None): self.channel_stub.requests.append((self.method, request)) response = None if self.channel_stub.responses: response = self.channel_stub.responses.pop() if isinstance(response, Exception): raise response if response: return response class ChannelStub(object): """Stub for the grpc.Channel interface.""" def __init__(self, responses=[]): self.responses = responses self.requests = [] def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class CustomException(Exception): pass class TestDlpServiceClient(object): def test_inspect_content(self): # Setup Expected Response expected_response = {} expected_response = dlp_pb2.InspectContentResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.inspect_content(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.InspectContentRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_inspect_content_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.inspect_content(parent) def test_redact_image(self): # Setup Expected Response redacted_image = b"28" extracted_text = "extractedText998260012" expected_response = { "redacted_image": redacted_image, "extracted_text": extracted_text, } expected_response = dlp_pb2.RedactImageResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.redact_image(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.RedactImageRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_redact_image_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.redact_image(parent) def test_deidentify_content(self): # Setup Expected Response expected_response = {} expected_response = dlp_pb2.DeidentifyContentResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.deidentify_content(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.DeidentifyContentRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_deidentify_content_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.deidentify_content(parent) def test_reidentify_content(self): # Setup Expected Response expected_response = {} expected_response = dlp_pb2.ReidentifyContentResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.reidentify_content(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.ReidentifyContentRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_reidentify_content_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.reidentify_content(parent) def test_list_info_types(self): # Setup Expected Response expected_response = {} expected_response = dlp_pb2.ListInfoTypesResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() response = client.list_info_types() assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.ListInfoTypesRequest() actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_info_types_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() with pytest.raises(CustomException): client.list_info_types() def test_create_inspect_template(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name, "display_name": display_name, "description": description, } expected_response = dlp_pb2.InspectTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") response = client.create_inspect_template(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.CreateInspectTemplateRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_inspect_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") with pytest.raises(CustomException): client.create_inspect_template(parent) def test_update_inspect_template(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "display_name": display_name, "description": description, } expected_response = dlp_pb2.InspectTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_inspect_template_path( "[ORGANIZATION]", "[INSPECT_TEMPLATE]" ) response = client.update_inspect_template(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.UpdateInspectTemplateRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_inspect_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_inspect_template_path( "[ORGANIZATION]", "[INSPECT_TEMPLATE]" ) with pytest.raises(CustomException): client.update_inspect_template(name) def test_get_inspect_template(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name, "display_name": display_name, "description": description, } expected_response = dlp_pb2.InspectTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() response = client.get_inspect_template() assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.GetInspectTemplateRequest() actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_inspect_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() with pytest.raises(CustomException): client.get_inspect_template() def test_list_inspect_templates(self): # Setup Expected Response next_page_token = "" inspect_templates_element = {} inspect_templates = [inspect_templates_element] expected_response = { "next_page_token": next_page_token, "inspect_templates": inspect_templates, } expected_response = dlp_pb2.ListInspectTemplatesResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_inspect_templates(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.inspect_templates[0] == resources[0] assert len(channel.requests) == 1 expected_request = dlp_pb2.ListInspectTemplatesRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_inspect_templates_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_inspect_templates(parent) with pytest.raises(CustomException): list(paged_list_response) def test_delete_inspect_template(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_inspect_template_path( "[ORGANIZATION]", "[INSPECT_TEMPLATE]" ) client.delete_inspect_template(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.DeleteInspectTemplateRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_inspect_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_inspect_template_path( "[ORGANIZATION]", "[INSPECT_TEMPLATE]" ) with pytest.raises(CustomException): client.delete_inspect_template(name) def test_create_deidentify_template(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name, "display_name": display_name, "description": description, } expected_response = dlp_pb2.DeidentifyTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") response = client.create_deidentify_template(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.CreateDeidentifyTemplateRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_deidentify_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") with pytest.raises(CustomException): client.create_deidentify_template(parent) def test_update_deidentify_template(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "display_name": display_name, "description": description, } expected_response = dlp_pb2.DeidentifyTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) response = client.update_deidentify_template(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.UpdateDeidentifyTemplateRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_deidentify_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) with pytest.raises(CustomException): client.update_deidentify_template(name) def test_get_deidentify_template(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "display_name": display_name, "description": description, } expected_response = dlp_pb2.DeidentifyTemplate(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) response = client.get_deidentify_template(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.GetDeidentifyTemplateRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_deidentify_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) with pytest.raises(CustomException): client.get_deidentify_template(name) def test_list_deidentify_templates(self): # Setup Expected Response next_page_token = "" deidentify_templates_element = {} deidentify_templates = [deidentify_templates_element] expected_response = { "next_page_token": next_page_token, "deidentify_templates": deidentify_templates, } expected_response = dlp_pb2.ListDeidentifyTemplatesResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_deidentify_templates(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.deidentify_templates[0] == resources[0] assert len(channel.requests) == 1 expected_request = dlp_pb2.ListDeidentifyTemplatesRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_deidentify_templates_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_deidentify_templates(parent) with pytest.raises(CustomException): list(paged_list_response) def test_delete_deidentify_template(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) client.delete_deidentify_template(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.DeleteDeidentifyTemplateRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_deidentify_template_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_deidentify_template_path( "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]" ) with pytest.raises(CustomException): client.delete_deidentify_template(name) def test_create_dlp_job(self): # Setup Expected Response name = "name3373707" job_trigger_name = "jobTriggerName1819490804" expected_response = {"name": name, "job_trigger_name": job_trigger_name} expected_response = dlp_pb2.DlpJob(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.create_dlp_job(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.CreateDlpJobRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_dlp_job_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.create_dlp_job(parent) def test_list_dlp_jobs(self): # Setup Expected Response next_page_token = "" jobs_element = {} jobs = [jobs_element] expected_response = {"next_page_token": next_page_token, "jobs": jobs} expected_response = dlp_pb2.ListDlpJobsResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") paged_list_response = client.list_dlp_jobs(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.jobs[0] == resources[0] assert len(channel.requests) == 1 expected_request = dlp_pb2.ListDlpJobsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_dlp_jobs_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") paged_list_response = client.list_dlp_jobs(parent) with pytest.raises(CustomException): list(paged_list_response) def test_get_dlp_job(self): # Setup Expected Response name_2 = "name2-1052831874" job_trigger_name = "jobTriggerName1819490804" expected_response = {"name": name_2, "job_trigger_name": job_trigger_name} expected_response = dlp_pb2.DlpJob(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") response = client.get_dlp_job(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.GetDlpJobRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_dlp_job_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") with pytest.raises(CustomException): client.get_dlp_job(name) def test_delete_dlp_job(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") client.delete_dlp_job(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.DeleteDlpJobRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_dlp_job_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") with pytest.raises(CustomException): client.delete_dlp_job(name) def test_cancel_dlp_job(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") client.cancel_dlp_job(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.CancelDlpJobRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_cancel_dlp_job_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.dlp_job_path("[PROJECT]", "[DLP_JOB]") with pytest.raises(CustomException): client.cancel_dlp_job(name) def test_list_job_triggers(self): # Setup Expected Response next_page_token = "" job_triggers_element = {} job_triggers = [job_triggers_element] expected_response = { "next_page_token": next_page_token, "job_triggers": job_triggers, } expected_response = dlp_pb2.ListJobTriggersResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") paged_list_response = client.list_job_triggers(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.job_triggers[0] == resources[0] assert len(channel.requests) == 1 expected_request = dlp_pb2.ListJobTriggersRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_job_triggers_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") paged_list_response = client.list_job_triggers(parent) with pytest.raises(CustomException): list(paged_list_response) def test_get_job_trigger(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "display_name": display_name, "description": description, } expected_response = dlp_pb2.JobTrigger(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.project_job_trigger_path("[PROJECT]", "[JOB_TRIGGER]") response = client.get_job_trigger(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.GetJobTriggerRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_job_trigger_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.project_job_trigger_path("[PROJECT]", "[JOB_TRIGGER]") with pytest.raises(CustomException): client.get_job_trigger(name) def test_delete_job_trigger(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = "name3373707" client.delete_job_trigger(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.DeleteJobTriggerRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_job_trigger_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = "name3373707" with pytest.raises(CustomException): client.delete_job_trigger(name) def test_update_job_trigger(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "display_name": display_name, "description": description, } expected_response = dlp_pb2.JobTrigger(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.project_job_trigger_path("[PROJECT]", "[JOB_TRIGGER]") response = client.update_job_trigger(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.UpdateJobTriggerRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_job_trigger_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.project_job_trigger_path("[PROJECT]", "[JOB_TRIGGER]") with pytest.raises(CustomException): client.update_job_trigger(name) def test_create_job_trigger(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name, "display_name": display_name, "description": description, } expected_response = dlp_pb2.JobTrigger(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.project_path("[PROJECT]") response = client.create_job_trigger(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.CreateJobTriggerRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_job_trigger_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.create_job_trigger(parent) def test_create_stored_info_type(self): # Setup Expected Response name = "name3373707" expected_response = {"name": name} expected_response = dlp_pb2.StoredInfoType(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") response = client.create_stored_info_type(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.CreateStoredInfoTypeRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_stored_info_type_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") with pytest.raises(CustomException): client.create_stored_info_type(parent) def test_update_stored_info_type(self): # Setup Expected Response name_2 = "name2-1052831874" expected_response = {"name": name_2} expected_response = dlp_pb2.StoredInfoType(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) response = client.update_stored_info_type(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.UpdateStoredInfoTypeRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_stored_info_type_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) with pytest.raises(CustomException): client.update_stored_info_type(name) def test_get_stored_info_type(self): # Setup Expected Response name_2 = "name2-1052831874" expected_response = {"name": name_2} expected_response = dlp_pb2.StoredInfoType(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) response = client.get_stored_info_type(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = dlp_pb2.GetStoredInfoTypeRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_stored_info_type_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) with pytest.raises(CustomException): client.get_stored_info_type(name) def test_list_stored_info_types(self): # Setup Expected Response next_page_token = "" stored_info_types_element = {} stored_info_types = [stored_info_types_element] expected_response = { "next_page_token": next_page_token, "stored_info_types": stored_info_types, } expected_response = dlp_pb2.ListStoredInfoTypesResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_stored_info_types(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.stored_info_types[0] == resources[0] assert len(channel.requests) == 1 expected_request = dlp_pb2.ListStoredInfoTypesRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_stored_info_types_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request parent = client.organization_path("[ORGANIZATION]") paged_list_response = client.list_stored_info_types(parent) with pytest.raises(CustomException): list(paged_list_response) def test_delete_stored_info_type(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup Request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) client.delete_stored_info_type(name) assert len(channel.requests) == 1 expected_request = dlp_pb2.DeleteStoredInfoTypeRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_stored_info_type_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dlp_v2.DlpServiceClient() # Setup request name = client.organization_stored_info_type_path( "[ORGANIZATION]", "[STORED_INFO_TYPE]" ) with pytest.raises(CustomException): client.delete_stored_info_type(name)
# Copyright 2013 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import workflows from openstack_dashboard import api from openstack_dashboard.dashboards.project.networks import workflows \ as network_workflows LOG = logging.getLogger(__name__) class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction): with_subnet = forms.BooleanField(initial=True, required=False, widget=forms.HiddenInput()) msg = _('Specify "Network Address"') def __init__(self, request, *args, **kwargs): super(CreateSubnetInfoAction, self).__init__(request, *args, **kwargs) self.fields['cidr'].required = True class Meta(object): name = _("Subnet") help_text = _('Create a subnet associated with the network. ' 'Advanced configuration is available by clicking on the ' '"Subnet Details" tab.') def clean(self): cleaned_data = workflows.Action.clean(self) self._check_subnet_data(cleaned_data) return cleaned_data class CreateSubnetInfo(network_workflows.CreateSubnetInfo): action_class = CreateSubnetInfoAction depends_on = ("network_id",) class CreateSubnet(network_workflows.CreateNetwork): slug = "create_subnet" name = _("Create Subnet") finalize_button_name = _("Create") success_message = _('Created subnet "%s".') failure_message = _('Unable to create subnet "%s".') default_steps = (CreateSubnetInfo, network_workflows.CreateSubnetDetail) def format_status_message(self, message): name = self.context.get('subnet_name') or self.context.get('subnet_id') return message % name def get_success_url(self): return reverse("horizon:project:networks:detail", args=(self.context.get('network_id'),)) def get_failure_url(self): return reverse("horizon:project:networks:detail", args=(self.context.get('network_id'),)) def handle(self, request, data): subnet = self._create_subnet(request, data) return True if subnet else False class UpdateSubnetInfoAction(CreateSubnetInfoAction): cidr = forms.IPField(label=_("Network Address"), required=False, initial="", widget=forms.TextInput( attrs={'readonly': 'readonly'}), help_text=_("Network address in CIDR format " "(e.g. 192.168.0.0/24)"), version=forms.IPv4 | forms.IPv6, mask=True) # NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField # and ValidationError is raised for POST request, the initial value of # the ip_version ChoiceField is not set in the re-displayed form # As a result, 'IPv4' is displayed even when IPv6 is used if # ValidationError is detected. In addition 'required=True' check complains # when re-POST since the value of the ChoiceField is not set. # Thus now I use HiddenInput for the ip_version ChoiceField as a work # around. ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')], widget=forms.HiddenInput(), label=_("IP Version")) gateway_ip = forms.IPField( label=_("Gateway IP (optional)"), required=False, initial="", help_text=_("IP address of Gateway (e.g. 192.168.0.254). " "Specify an explicit address to set the gateway. " "If you do not want to use a gateway, " "check 'Disable Gateway' below."), version=forms.IPv4 | forms.IPv6, mask=False) no_gateway = forms.BooleanField(label=_("Disable Gateway"), initial=False, required=False) class Meta(object): name = _("Subnet") help_text = _('Update a subnet associated with the network. ' 'Advanced configuration are available at ' '"Subnet Details" tab.') def clean(self): cleaned_data = workflows.Action.clean(self) self._check_subnet_data(cleaned_data, is_create=False) return cleaned_data class UpdateSubnetInfo(CreateSubnetInfo): action_class = UpdateSubnetInfoAction depends_on = ("network_id", "subnet_id") class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction): def __init__(self, request, context, *args, **kwargs): super(UpdateSubnetDetailAction, self).__init__(request, context, *args, **kwargs) # TODO(amotoki): Due to Neutron bug 1362966, we cannot pass "None" # to Neutron. It means we cannot set IPv6 two modes to # "No option selected". # Until bug 1362966 is fixed, we disable this field. # if context['ip_version'] != 6: # self.fields['ipv6_modes'].widget = forms.HiddenInput() # self.fields['ipv6_modes'].required = False self.fields['ipv6_modes'].widget = forms.HiddenInput() self.fields['ipv6_modes'].required = False class Meta(object): name = _("Subnet Details") help_text = _('Specify additional attributes for the subnet.') class UpdateSubnetDetail(network_workflows.CreateSubnetDetail): action_class = UpdateSubnetDetailAction class UpdateSubnet(network_workflows.CreateNetwork): slug = "update_subnet" name = _("Edit Subnet") finalize_button_name = _("Save") success_message = _('Updated subnet "%s".') failure_message = _('Unable to update subnet "%s".') success_url = "horizon:project:networks:detail" failure_url = "horizon:project:networks:detail" default_steps = (UpdateSubnetInfo, UpdateSubnetDetail) def format_status_message(self, message): name = self.context.get('subnet_name') or self.context.get('subnet_id') return message % name def get_success_url(self): return reverse(self.success_url, args=(self.context.get('network_id'),)) def _update_subnet(self, request, data): network_id = self.context.get('network_id') try: subnet_id = self.context.get('subnet_id') params = {} params['name'] = data['subnet_name'] if data['no_gateway']: params['gateway_ip'] = None elif data['gateway_ip']: params['gateway_ip'] = data['gateway_ip'] # We should send gateway_ip only when it is changed, because # updating gateway_ip is prohibited when the ip is used. # See bug 1227268. subnet = api.neutron.subnet_get(request, subnet_id) if params['gateway_ip'] == subnet.gateway_ip: del params['gateway_ip'] self._setup_subnet_parameters(params, data, is_create=False) subnet = api.neutron.subnet_update(request, subnet_id, **params) msg = _('Subnet "%s" was successfully updated.') % data['cidr'] LOG.debug(msg) return subnet except Exception as e: msg = (_('Failed to update subnet "%(sub)s": ' ' %(reason)s') % {"sub": data['cidr'], "reason": e}) redirect = reverse(self.failure_url, args=(network_id,)) exceptions.handle(request, msg, redirect=redirect) return False def handle(self, request, data): subnet = self._update_subnet(request, data) return True if subnet else False
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. from .error import TweepError from .utils import parse_datetime, parse_html_value, parse_a_href class ResultSet(list): """A list like object that holds results from a Twitter API query.""" def __init__(self, max_id=None, since_id=None): super(ResultSet, self).__init__() self._max_id = max_id self._since_id = since_id @property def max_id(self): if self._max_id: return self._max_id ids = self.ids() return max(ids) if ids else None @property def since_id(self): if self._since_id: return self._since_id ids = self.ids() return min(ids) if ids else None def ids(self): return [item.id for item in self if hasattr(item, 'id')] class Model(object): def __init__(self, api=None): self._api = api def __getstate__(self): # pickle pickle = dict(self.__dict__) try: del pickle['_api'] # do not pickle the API reference except KeyError: pass return pickle @classmethod def parse(cls, api, json): """Parse a JSON object into a model instance.""" raise NotImplementedError @classmethod def parse_list(cls, api, json_list): """Parse a list of JSON objects into a result set of model instances.""" results = ResultSet() for obj in json_list: if obj: results.append(cls.parse(api, obj)) return results class Status(Model): @classmethod def parse(cls, api, json): status = cls(api) for k, v in json.items(): if k == 'user': user_model = getattr(api.parser.model_factory, 'user') if api else User user = user_model.parse(api, v) setattr(status, 'author', user) setattr(status, 'user', user) # DEPRECIATED elif k == 'created_at': setattr(status, k, parse_datetime(v)) elif k == 'source': if '<' in v: setattr(status, k, parse_html_value(v)) setattr(status, 'source_url', parse_a_href(v)) else: setattr(status, k, v) setattr(status, 'source_url', None) elif k == 'retweeted_status': setattr(status, k, Status.parse(api, v)) elif k == 'place': if v is not None: setattr(status, k, Place.parse(api, v)) else: setattr(status, k, None) else: setattr(status, k, v) return status def destroy(self): return self._api.destroy_status(self.id) def retweet(self): return self._api.retweet(self.id) def retweets(self): return self._api.retweets(self.id) def favorite(self): return self._api.create_favorite(self.id) class User(Model): @classmethod def parse(cls, api, json): user = cls(api) for k, v in json.items(): if k == 'created_at': setattr(user, k, parse_datetime(v)) elif k == 'status': setattr(user, k, Status.parse(api, v)) elif k == 'following': # twitter sets this to null if it is false if v is True: setattr(user, k, True) else: setattr(user, k, False) else: setattr(user, k, v) return user @classmethod def parse_list(cls, api, json_list): if isinstance(json_list, list): item_list = json_list else: item_list = json_list['users'] results = ResultSet() for obj in item_list: results.append(cls.parse(api, obj)) return results def timeline(self, **kargs): return self._api.user_timeline(user_id=self.id, **kargs) def friends(self, **kargs): return self._api.friends(user_id=self.id, **kargs) def followers(self, **kargs): return self._api.followers(user_id=self.id, **kargs) def follow(self): self._api.create_friendship(user_id=self.id) self.following = True def unfollow(self): self._api.destroy_friendship(user_id=self.id) self.following = False def lists_memberships(self, *args, **kargs): return self._api.lists_memberships(user=self.screen_name, *args, **kargs) def lists_subscriptions(self, *args, **kargs): return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs) def lists(self, *args, **kargs): return self._api.lists(user=self.screen_name, *args, **kargs) def followers_ids(self, *args, **kargs): return self._api.followers_ids(user_id=self.id, *args, **kargs) class DirectMessage(Model): @classmethod def parse(cls, api, json): dm = cls(api) for k, v in json.items(): if k == 'sender' or k == 'recipient': setattr(dm, k, User.parse(api, v)) elif k == 'created_at': setattr(dm, k, parse_datetime(v)) else: setattr(dm, k, v) return dm def destroy(self): return self._api.destroy_direct_message(self.id) class Friendship(Model): @classmethod def parse(cls, api, json): relationship = json['relationship'] # parse source source = cls(api) for k, v in relationship['source'].items(): setattr(source, k, v) # parse target target = cls(api) for k, v in relationship['target'].items(): setattr(target, k, v) return source, target class Category(Model): @classmethod def parse(cls, api, json): category = cls(api) for k, v in json.items(): setattr(category, k, v) return category class SavedSearch(Model): @classmethod def parse(cls, api, json): ss = cls(api) for k, v in json.items(): if k == 'created_at': setattr(ss, k, parse_datetime(v)) else: setattr(ss, k, v) return ss def destroy(self): return self._api.destroy_saved_search(self.id) class SearchResults(ResultSet): @classmethod def parse(cls, api, json): metadata = json['search_metadata'] results = SearchResults(metadata.get('max_id'), metadata.get('since_id')) results.refresh_url = metadata.get('refresh_url') results.completed_in = metadata.get('completed_in') results.query = metadata.get('query') for status in json['statuses']: results.append(Status.parse(api, status)) return results class List(Model): @classmethod def parse(cls, api, json): lst = List(api) for k,v in json.items(): if k == 'user': setattr(lst, k, User.parse(api, v)) elif k == 'created_at': setattr(lst, k, parse_datetime(v)) else: setattr(lst, k, v) return lst @classmethod def parse_list(cls, api, json_list, result_set=None): results = ResultSet() if isinstance(json_list, dict): json_list = json_list['lists'] for obj in json_list: results.append(cls.parse(api, obj)) return results def update(self, **kargs): return self._api.update_list(self.slug, **kargs) def destroy(self): return self._api.destroy_list(self.slug) def timeline(self, **kargs): return self._api.list_timeline(self.user.screen_name, self.slug, **kargs) def add_member(self, id): return self._api.add_list_member(self.slug, id) def remove_member(self, id): return self._api.remove_list_member(self.slug, id) def members(self, **kargs): return self._api.list_members(self.user.screen_name, self.slug, **kargs) def is_member(self, id): return self._api.is_list_member(self.user.screen_name, self.slug, id) def subscribe(self): return self._api.subscribe_list(self.user.screen_name, self.slug) def unsubscribe(self): return self._api.unsubscribe_list(self.user.screen_name, self.slug) def subscribers(self, **kargs): return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs) def is_subscribed(self, id): return self._api.is_subscribed_list(self.user.screen_name, self.slug, id) class Relation(Model): @classmethod def parse(cls, api, json): result = cls(api) for k,v in json.items(): if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']: setattr(result, k, Status.parse(api, v)) elif k == 'results': setattr(result, k, Relation.parse_list(api, v)) else: setattr(result, k, v) return result class Relationship(Model): @classmethod def parse(cls, api, json): result = cls(api) for k,v in json.items(): if k == 'connections': setattr(result, 'is_following', 'following' in v) setattr(result, 'is_followed_by', 'followed_by' in v) else: setattr(result, k, v) return result class JSONModel(Model): @classmethod def parse(cls, api, json): return json class IDModel(Model): @classmethod def parse(cls, api, json): if isinstance(json, list): return json else: return json['ids'] class BoundingBox(Model): @classmethod def parse(cls, api, json): result = cls(api) if json is not None: for k, v in json.items(): setattr(result, k, v) return result def origin(self): """ Return longitude, latitude of southwest (bottom, left) corner of bounding box, as a tuple. This assumes that bounding box is always a rectangle, which appears to be the case at present. """ return tuple(self.coordinates[0][0]) def corner(self): """ Return longitude, latitude of northeast (top, right) corner of bounding box, as a tuple. This assumes that bounding box is always a rectangle, which appears to be the case at present. """ return tuple(self.coordinates[0][2]) class Place(Model): @classmethod def parse(cls, api, json): place = cls(api) for k, v in json.items(): if k == 'bounding_box': # bounding_box value may be null (None.) # Example: "United States" (id=96683cc9126741d1) if v is not None: t = BoundingBox.parse(api, v) else: t = v setattr(place, k, t) elif k == 'contained_within': # contained_within is a list of Places. setattr(place, k, Place.parse_list(api, v)) else: setattr(place, k, v) return place @classmethod def parse_list(cls, api, json_list): if isinstance(json_list, list): item_list = json_list else: item_list = json_list['result']['places'] results = ResultSet() for obj in item_list: results.append(cls.parse(api, obj)) return results class ModelFactory(object): """ Used by parsers for creating instances of models. You may subclass this factory to add your own extended models. """ status = Status user = User direct_message = DirectMessage friendship = Friendship saved_search = SavedSearch search_results = SearchResults category = Category list = List relation = Relation relationship = Relationship json = JSONModel ids = IDModel place = Place bounding_box = BoundingBox
import fnmatch import functools import io import ntpath import os import posixpath import re import sys from collections import Sequence from contextlib import contextmanager from errno import EINVAL, ENOENT, ENOTDIR from operator import attrgetter from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO from urllib.parse import quote_from_bytes as urlquote_from_bytes supports_symlinks = True if os.name == 'nt': import nt if sys.getwindowsversion()[:2] >= (6, 0): from nt import _getfinalpathname else: supports_symlinks = False _getfinalpathname = None else: nt = None __all__ = [ "PurePath", "PurePosixPath", "PureWindowsPath", "Path", "PosixPath", "WindowsPath", ] # # Internals # def _is_wildcard_pattern(pat): # Whether this pattern needs actual matching using fnmatch, or can # be looked up directly as a file. return "*" in pat or "?" in pat or "[" in pat class _Flavour(object): """A flavour implements a particular (platform-specific) set of path semantics.""" def __init__(self): self.join = self.sep.join def parse_parts(self, parts): parsed = [] sep = self.sep altsep = self.altsep drv = root = '' it = reversed(parts) for part in it: if not part: continue if altsep: part = part.replace(altsep, sep) drv, root, rel = self.splitroot(part) if sep in rel: for x in reversed(rel.split(sep)): if x and x != '.': parsed.append(sys.intern(x)) else: if rel and rel != '.': parsed.append(sys.intern(rel)) if drv or root: if not drv: # If no drive is present, try to find one in the previous # parts. This makes the result of parsing e.g. # ("C:", "/", "a") reasonably intuitive. for part in it: if not part: continue if altsep: part = part.replace(altsep, sep) drv = self.splitroot(part)[0] if drv: break break if drv or root: parsed.append(drv + root) parsed.reverse() return drv, root, parsed def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2): """ Join the two paths represented by the respective (drive, root, parts) tuples. Return a new (drive, root, parts) tuple. """ if root2: if not drv2 and drv: return drv, root2, [drv + root2] + parts2[1:] elif drv2: if drv2 == drv or self.casefold(drv2) == self.casefold(drv): # Same drive => second path is relative to the first return drv, root, parts + parts2[1:] else: # Second path is non-anchored (common case) return drv, root, parts + parts2 return drv2, root2, parts2 class _WindowsFlavour(_Flavour): # Reference for Windows paths can be found at # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx sep = '\\' altsep = '/' has_drv = True pathmod = ntpath is_supported = (os.name == 'nt') drive_letters = ( set(chr(x) for x in range(ord('a'), ord('z') + 1)) | set(chr(x) for x in range(ord('A'), ord('Z') + 1)) ) ext_namespace_prefix = '\\\\?\\' reserved_names = ( {'CON', 'PRN', 'AUX', 'NUL'} | {'COM%d' % i for i in range(1, 10)} | {'LPT%d' % i for i in range(1, 10)} ) # Interesting findings about extended paths: # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported # but '\\?\c:/a' is not # - extended paths are always absolute; "relative" extended paths will # fail. def splitroot(self, part, sep=sep): first = part[0:1] second = part[1:2] if (second == sep and first == sep): # XXX extended paths should also disable the collapsing of "." # components (according to MSDN docs). prefix, part = self._split_extended_path(part) first = part[0:1] second = part[1:2] else: prefix = '' third = part[2:3] if (second == sep and first == sep and third != sep): # is a UNC path: # vvvvvvvvvvvvvvvvvvvvv root # \\machine\mountpoint\directory\etc\... # directory ^^^^^^^^^^^^^^ index = part.find(sep, 2) if index != -1: index2 = part.find(sep, index + 1) # a UNC path can't have two slashes in a row # (after the initial two) if index2 != index + 1: if index2 == -1: index2 = len(part) if prefix: return prefix + part[1:index2], sep, part[index2+1:] else: return part[:index2], sep, part[index2+1:] drv = root = '' if second == ':' and first in self.drive_letters: drv = part[:2] part = part[2:] first = third if first == sep: root = first part = part.lstrip(sep) return prefix + drv, root, part def casefold(self, s): return s.lower() def casefold_parts(self, parts): return [p.lower() for p in parts] def resolve(self, path, strict=False): s = str(path) if not s: return os.getcwd() previous_s = None if _getfinalpathname is not None: if strict: return self._ext_to_normal(_getfinalpathname(s)) else: while True: try: s = self._ext_to_normal(_getfinalpathname(s)) except FileNotFoundError: previous_s = s s = os.path.abspath(os.path.join(s, os.pardir)) else: if previous_s is None: return s else: return s + os.path.sep + os.path.basename(previous_s) # Means fallback on absolute return None def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix): prefix = '' if s.startswith(ext_prefix): prefix = s[:4] s = s[4:] if s.startswith('UNC\\'): prefix += s[:3] s = '\\' + s[3:] return prefix, s def _ext_to_normal(self, s): # Turn back an extended path into a normal DOS-like path return self._split_extended_path(s)[1] def is_reserved(self, parts): # NOTE: the rules for reserved names seem somewhat complicated # (e.g. r"..\NUL" is reserved but not r"foo\NUL"). # We err on the side of caution and return True for paths which are # not considered reserved by Windows. if not parts: return False if parts[0].startswith('\\\\'): # UNC paths are never reserved return False return parts[-1].partition('.')[0].upper() in self.reserved_names def make_uri(self, path): # Under Windows, file URIs use the UTF-8 encoding. drive = path.drive if len(drive) == 2 and drive[1] == ':': # It's a path on a local drive => 'file:///c:/a/b' rest = path.as_posix()[2:].lstrip('/') return 'file:///%s/%s' % ( drive, urlquote_from_bytes(rest.encode('utf-8'))) else: # It's a path on a network drive => 'file://host/share/a/b' return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8')) def gethomedir(self, username): if 'HOME' in os.environ: userhome = os.environ['HOME'] elif 'USERPROFILE' in os.environ: userhome = os.environ['USERPROFILE'] elif 'HOMEPATH' in os.environ: try: drv = os.environ['HOMEDRIVE'] except KeyError: drv = '' userhome = drv + os.environ['HOMEPATH'] else: raise RuntimeError("Can't determine home directory") if username: # Try to guess user home directory. By default all users # directories are located in the same place and are named by # corresponding usernames. If current user home directory points # to nonstandard place, this guess is likely wrong. if os.environ['USERNAME'] != username: drv, root, parts = self.parse_parts((userhome,)) if parts[-1] != os.environ['USERNAME']: raise RuntimeError("Can't determine home directory " "for %r" % username) parts[-1] = username if drv or root: userhome = drv + root + self.join(parts[1:]) else: userhome = self.join(parts) return userhome class _PosixFlavour(_Flavour): sep = '/' altsep = '' has_drv = False pathmod = posixpath is_supported = (os.name != 'nt') def splitroot(self, part, sep=sep): if part and part[0] == sep: stripped_part = part.lstrip(sep) # According to POSIX path resolution: # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11 # "A pathname that begins with two successive slashes may be # interpreted in an implementation-defined manner, although more # than two leading slashes shall be treated as a single slash". if len(part) - len(stripped_part) == 2: return '', sep * 2, stripped_part else: return '', sep, stripped_part else: return '', '', part def casefold(self, s): return s def casefold_parts(self, parts): return parts def resolve(self, path, strict=False): sep = self.sep accessor = path._accessor seen = {} def _resolve(path, rest): if rest.startswith(sep): path = '' for name in rest.split(sep): if not name or name == '.': # current dir continue if name == '..': # parent dir path, _, _ = path.rpartition(sep) continue newpath = path + sep + name if newpath in seen: # Already seen this path path = seen[newpath] if path is not None: # use cached value continue # The symlink is not resolved, so we must have a symlink loop. raise RuntimeError("Symlink loop from %r" % newpath) # Resolve the symbolic link try: target = accessor.readlink(newpath) except OSError as e: if e.errno != EINVAL: if strict: raise else: return newpath # Not a symlink path = newpath else: seen[newpath] = None # not resolved symlink path = _resolve(path, target) seen[newpath] = path # resolved symlink return path # NOTE: according to POSIX, getcwd() cannot contain path components # which are symlinks. base = '' if path.is_absolute() else os.getcwd() return _resolve(base, str(path)) or sep def is_reserved(self, parts): return False def make_uri(self, path): # We represent the path using the local filesystem encoding, # for portability to other applications. bpath = bytes(path) return 'file://' + urlquote_from_bytes(bpath) def gethomedir(self, username): if not username: try: return os.environ['HOME'] except KeyError: import pwd return pwd.getpwuid(os.getuid()).pw_dir else: import pwd try: return pwd.getpwnam(username).pw_dir except KeyError: raise RuntimeError("Can't determine home directory " "for %r" % username) _windows_flavour = _WindowsFlavour() _posix_flavour = _PosixFlavour() class _Accessor: """An accessor implements a particular (system-specific or not) way of accessing paths on the filesystem.""" class _NormalAccessor(_Accessor): def _wrap_strfunc(strfunc): @functools.wraps(strfunc) def wrapped(pathobj, *args): return strfunc(str(pathobj), *args) return staticmethod(wrapped) def _wrap_binary_strfunc(strfunc): @functools.wraps(strfunc) def wrapped(pathobjA, pathobjB, *args): return strfunc(str(pathobjA), str(pathobjB), *args) return staticmethod(wrapped) stat = _wrap_strfunc(os.stat) lstat = _wrap_strfunc(os.lstat) open = _wrap_strfunc(os.open) listdir = _wrap_strfunc(os.listdir) scandir = _wrap_strfunc(os.scandir) chmod = _wrap_strfunc(os.chmod) if hasattr(os, "lchmod"): lchmod = _wrap_strfunc(os.lchmod) else: def lchmod(self, pathobj, mode): raise NotImplementedError("lchmod() not available on this system") mkdir = _wrap_strfunc(os.mkdir) unlink = _wrap_strfunc(os.unlink) rmdir = _wrap_strfunc(os.rmdir) rename = _wrap_binary_strfunc(os.rename) replace = _wrap_binary_strfunc(os.replace) if nt: if supports_symlinks: symlink = _wrap_binary_strfunc(os.symlink) else: def symlink(a, b, target_is_directory): raise NotImplementedError("symlink() not available on this system") else: # Under POSIX, os.symlink() takes two args @staticmethod def symlink(a, b, target_is_directory): return os.symlink(str(a), str(b)) utime = _wrap_strfunc(os.utime) # Helper for resolve() def readlink(self, path): return os.readlink(path) _normal_accessor = _NormalAccessor() # # Globbing helpers # def _make_selector(pattern_parts): pat = pattern_parts[0] child_parts = pattern_parts[1:] if pat == '**': cls = _RecursiveWildcardSelector elif '**' in pat: raise ValueError("Invalid pattern: '**' can only be an entire path component") elif _is_wildcard_pattern(pat): cls = _WildcardSelector else: cls = _PreciseSelector return cls(pat, child_parts) if hasattr(functools, "lru_cache"): _make_selector = functools.lru_cache()(_make_selector) class _Selector: """A selector matches a specific glob pattern part against the children of a given path.""" def __init__(self, child_parts): self.child_parts = child_parts if child_parts: self.successor = _make_selector(child_parts) self.dironly = True else: self.successor = _TerminatingSelector() self.dironly = False def select_from(self, parent_path): """Iterate over all child paths of `parent_path` matched by this selector. This can contain parent_path itself.""" path_cls = type(parent_path) is_dir = path_cls.is_dir exists = path_cls.exists scandir = parent_path._accessor.scandir if not is_dir(parent_path): return iter([]) return self._select_from(parent_path, is_dir, exists, scandir) class _TerminatingSelector: def _select_from(self, parent_path, is_dir, exists, scandir): yield parent_path class _PreciseSelector(_Selector): def __init__(self, name, child_parts): self.name = name _Selector.__init__(self, child_parts) def _select_from(self, parent_path, is_dir, exists, scandir): try: path = parent_path._make_child_relpath(self.name) if (is_dir if self.dironly else exists)(path): for p in self.successor._select_from(path, is_dir, exists, scandir): yield p except PermissionError: return class _WildcardSelector(_Selector): def __init__(self, pat, child_parts): self.pat = re.compile(fnmatch.translate(pat)) _Selector.__init__(self, child_parts) def _select_from(self, parent_path, is_dir, exists, scandir): try: cf = parent_path._flavour.casefold entries = list(scandir(parent_path)) for entry in entries: if not self.dironly or entry.is_dir(): name = entry.name casefolded = cf(name) if self.pat.match(casefolded): path = parent_path._make_child_relpath(name) for p in self.successor._select_from(path, is_dir, exists, scandir): yield p except PermissionError: return class _RecursiveWildcardSelector(_Selector): def __init__(self, pat, child_parts): _Selector.__init__(self, child_parts) def _iterate_directories(self, parent_path, is_dir, scandir): yield parent_path try: entries = list(scandir(parent_path)) for entry in entries: if entry.is_dir() and not entry.is_symlink(): path = parent_path._make_child_relpath(entry.name) for p in self._iterate_directories(path, is_dir, scandir): yield p except PermissionError: return def _select_from(self, parent_path, is_dir, exists, scandir): try: yielded = set() try: successor_select = self.successor._select_from for starting_point in self._iterate_directories(parent_path, is_dir, scandir): for p in successor_select(starting_point, is_dir, exists, scandir): if p not in yielded: yield p yielded.add(p) finally: yielded.clear() except PermissionError: return # # Public API # class _PathParents(Sequence): """This object provides sequence-like access to the logical ancestors of a path. Don't try to construct it yourself.""" __slots__ = ('_pathcls', '_drv', '_root', '_parts') def __init__(self, path): # We don't store the instance to avoid reference cycles self._pathcls = type(path) self._drv = path._drv self._root = path._root self._parts = path._parts def __len__(self): if self._drv or self._root: return len(self._parts) - 1 else: return len(self._parts) def __getitem__(self, idx): if idx < 0 or idx >= len(self): raise IndexError(idx) return self._pathcls._from_parsed_parts(self._drv, self._root, self._parts[:-idx - 1]) def __repr__(self): return "<{}.parents>".format(self._pathcls.__name__) class PurePath(object): """PurePath represents a filesystem path and offers operations which don't imply any actual filesystem I/O. Depending on your system, instantiating a PurePath will return either a PurePosixPath or a PureWindowsPath object. You can also instantiate either of these classes directly, regardless of your system. """ __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) def __new__(cls, *args): """Construct a PurePath from one or several strings and or existing PurePath objects. The strings and path objects are combined so as to yield a canonicalized path, which is incorporated into the new PurePath object. """ if cls is PurePath: cls = PureWindowsPath if os.name == 'nt' else PurePosixPath return cls._from_parts(args) def __reduce__(self): # Using the parts tuple helps share interned path parts # when pickling related paths. return (self.__class__, tuple(self._parts)) @classmethod def _parse_args(cls, args): # This is useful when you don't want to create an instance, just # canonicalize some constructor arguments. parts = [] for a in args: if isinstance(a, PurePath): parts += a._parts else: a = os.fspath(a) if isinstance(a, str): # Force-cast str subclasses to str (issue #21127) parts.append(str(a)) else: raise TypeError( "argument should be a str object or an os.PathLike " "object returning str, not %r" % type(a)) return cls._flavour.parse_parts(parts) @classmethod def _from_parts(cls, args, init=True): # We need to call _parse_args on the instance, so as to get the # right flavour. self = object.__new__(cls) drv, root, parts = self._parse_args(args) self._drv = drv self._root = root self._parts = parts if init: self._init() return self @classmethod def _from_parsed_parts(cls, drv, root, parts, init=True): self = object.__new__(cls) self._drv = drv self._root = root self._parts = parts if init: self._init() return self @classmethod def _format_parsed_parts(cls, drv, root, parts): if drv or root: return drv + root + cls._flavour.join(parts[1:]) else: return cls._flavour.join(parts) def _init(self): # Overridden in concrete Path pass def _make_child(self, args): drv, root, parts = self._parse_args(args) drv, root, parts = self._flavour.join_parsed_parts( self._drv, self._root, self._parts, drv, root, parts) return self._from_parsed_parts(drv, root, parts) def __str__(self): """Return the string representation of the path, suitable for passing to system calls.""" try: return self._str except AttributeError: self._str = self._format_parsed_parts(self._drv, self._root, self._parts) or '.' return self._str def __fspath__(self): return str(self) def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/') def __bytes__(self): """Return the bytes representation of the path. This is only recommended to use under Unix.""" return os.fsencode(str(self)) def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.as_posix()) def as_uri(self): """Return the path as a 'file' URI.""" if not self.is_absolute(): raise ValueError("relative path can't be expressed as a file URI") return self._flavour.make_uri(self) @property def _cparts(self): # Cached casefolded parts, for hashing and comparison try: return self._cached_cparts except AttributeError: self._cached_cparts = self._flavour.casefold_parts(self._parts) return self._cached_cparts def __eq__(self, other): if not isinstance(other, PurePath): return NotImplemented return self._cparts == other._cparts and self._flavour is other._flavour def __hash__(self): try: return self._hash except AttributeError: self._hash = hash(tuple(self._cparts)) return self._hash def __lt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts < other._cparts def __le__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts <= other._cparts def __gt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts > other._cparts def __ge__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts >= other._cparts drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") @property def anchor(self): """The concatenation of the drive and root, or ''.""" anchor = self._drv + self._root return anchor @property def name(self): """The final path component, if any.""" parts = self._parts if len(parts) == (1 if (self._drv or self._root) else 0): return '' return parts[-1] @property def suffix(self): """The final component's last suffix, if any.""" name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[i:] else: return '' @property def suffixes(self): """A list of the final component's suffixes, if any.""" name = self.name if name.endswith('.'): return [] name = name.lstrip('.') return ['.' + suffix for suffix in name.split('.')[1:]] @property def stem(self): """The final path component, minus its last suffix.""" name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[:i] else: return name def with_name(self, name): """Return a new path with the file name changed.""" if not self.name: raise ValueError("%r has an empty name" % (self,)) drv, root, parts = self._flavour.parse_parts((name,)) if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep] or drv or root or len(parts) != 1): raise ValueError("Invalid name %r" % (name)) return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def with_suffix(self, suffix): """Return a new path with the file suffix changed (or added, if none).""" # XXX if suffix is None, should the current suffix be removed? f = self._flavour if f.sep in suffix or f.altsep and f.altsep in suffix: raise ValueError("Invalid suffix %r" % (suffix)) if suffix and not suffix.startswith('.') or suffix == '.': raise ValueError("Invalid suffix %r" % (suffix)) name = self.name if not name: raise ValueError("%r has an empty name" % (self,)) old_suffix = self.suffix if not old_suffix: name = name + suffix else: name = name[:-len(old_suffix)] + suffix return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def relative_to(self, *other): """Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError. """ # For the purpose of this method, drive and root are considered # separate parts, i.e.: # Path('c:/').relative_to('c:') gives Path('/') # Path('c:/').relative_to('/') raise ValueError if not other: raise TypeError("need at least one argument") parts = self._parts drv = self._drv root = self._root if root: abs_parts = [drv, root] + parts[1:] else: abs_parts = parts to_drv, to_root, to_parts = self._parse_args(other) if to_root: to_abs_parts = [to_drv, to_root] + to_parts[1:] else: to_abs_parts = to_parts n = len(to_abs_parts) cf = self._flavour.casefold_parts if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts): formatted = self._format_parsed_parts(to_drv, to_root, to_parts) raise ValueError("{!r} does not start with {!r}" .format(str(self), str(formatted))) return self._from_parsed_parts('', root if n == 1 else '', abs_parts[n:]) @property def parts(self): """An object providing sequence-like access to the components in the filesystem path.""" # We cache the tuple to avoid building a new one each time .parts # is accessed. XXX is this necessary? try: return self._pparts except AttributeError: self._pparts = tuple(self._parts) return self._pparts def joinpath(self, *args): """Combine this path with one or several arguments, and return a new path representing either a subpath (if all arguments are relative paths) or a totally different path (if one of the arguments is anchored). """ return self._make_child(args) def __truediv__(self, key): return self._make_child((key,)) def __rtruediv__(self, key): return self._from_parts([key] + self._parts) @property def parent(self): """The logical parent of the path.""" drv = self._drv root = self._root parts = self._parts if len(parts) == 1 and (drv or root): return self return self._from_parsed_parts(drv, root, parts[:-1]) @property def parents(self): """A sequence of this path's logical parents.""" return _PathParents(self) def is_absolute(self): """True if the path is absolute (has both a root and, if applicable, a drive).""" if not self._root: return False return not self._flavour.has_drv or bool(self._drv) def is_reserved(self): """Return True if the path contains one of the special names reserved by the system, if any.""" return self._flavour.is_reserved(self._parts) def match(self, path_pattern): """ Return True if this path matches the given pattern. """ cf = self._flavour.casefold path_pattern = cf(path_pattern) drv, root, pat_parts = self._flavour.parse_parts((path_pattern,)) if not pat_parts: raise ValueError("empty pattern") if drv and drv != cf(self._drv): return False if root and root != cf(self._root): return False parts = self._cparts if drv or root: if len(pat_parts) != len(parts): return False pat_parts = pat_parts[1:] elif len(pat_parts) > len(parts): return False for part, pat in zip(reversed(parts), reversed(pat_parts)): if not fnmatch.fnmatchcase(part, pat): return False return True # Can't subclass os.PathLike from PurePath and keep the constructor # optimizations in PurePath._parse_args(). os.PathLike.register(PurePath) class PurePosixPath(PurePath): _flavour = _posix_flavour __slots__ = () class PureWindowsPath(PurePath): _flavour = _windows_flavour __slots__ = () # Filesystem-accessing classes class Path(PurePath): __slots__ = ( '_accessor', '_closed', ) def __new__(cls, *args, **kwargs): if cls is Path: cls = WindowsPath if os.name == 'nt' else PosixPath self = cls._from_parts(args, init=False) if not self._flavour.is_supported: raise NotImplementedError("cannot instantiate %r on your system" % (cls.__name__,)) self._init() return self def _init(self, # Private non-constructor arguments template=None, ): self._closed = False if template is not None: self._accessor = template._accessor else: self._accessor = _normal_accessor def _make_child_relpath(self, part): # This is an optimization used for dir walking. `part` must be # a single part relative to this path. parts = self._parts + [part] return self._from_parsed_parts(self._drv, self._root, parts) def __enter__(self): if self._closed: self._raise_closed() return self def __exit__(self, t, v, tb): self._closed = True def _raise_closed(self): raise ValueError("I/O operation on closed path") def _opener(self, name, flags, mode=0o666): # A stub for the opener argument to built-in open() return self._accessor.open(self, flags, mode) def _raw_open(self, flags, mode=0o777): """ Open the file pointed by this path and return a file descriptor, as os.open() does. """ if self._closed: self._raise_closed() return self._accessor.open(self, flags, mode) # Public API @classmethod def cwd(cls): """Return a new path pointing to the current working directory (as returned by os.getcwd()). """ return cls(os.getcwd()) @classmethod def home(cls): """Return a new path pointing to the user's home directory (as returned by os.path.expanduser('~')). """ return cls(cls()._flavour.gethomedir(None)) def samefile(self, other_path): """Return whether other_path is the same or not as this file (as returned by os.path.samefile()). """ st = self.stat() try: other_st = other_path.stat() except AttributeError: other_st = os.stat(other_path) return os.path.samestat(st, other_st) def iterdir(self): """Iterate over the files in this directory. Does not yield any result for the special paths '.' and '..'. """ if self._closed: self._raise_closed() for name in self._accessor.listdir(self): if name in {'.', '..'}: # Yielding a path object for these makes little sense continue yield self._make_child_relpath(name) if self._closed: self._raise_closed() def glob(self, pattern): """Iterate over this subtree and yield all existing files (of any kind, including directories) matching the given pattern. """ if not pattern: raise ValueError("Unacceptable pattern: {!r}".format(pattern)) pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(tuple(pattern_parts)) for p in selector.select_from(self): yield p def rglob(self, pattern): """Recursively yield all existing files (of any kind, including directories) matching the given pattern, anywhere in this subtree. """ pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(("**",) + tuple(pattern_parts)) for p in selector.select_from(self): yield p def absolute(self): """Return an absolute version of this path. This function works even if the path doesn't point to anything. No normalization is done, i.e. all '.' and '..' will be kept along. Use resolve() to get the canonical path to a file. """ # XXX untested yet! if self._closed: self._raise_closed() if self.is_absolute(): return self # FIXME this must defer to the specific flavour (and, under Windows, # use nt._getfullpathname()) obj = self._from_parts([os.getcwd()] + self._parts, init=False) obj._init(template=self) return obj def resolve(self, strict=False): """ Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). """ if self._closed: self._raise_closed() s = self._flavour.resolve(self, strict=strict) if s is None: # No symlink resolution => for consistency, raise an error if # the path doesn't exist or is forbidden self.stat() s = str(self.absolute()) # Now we have no symlinks in the path, it's safe to normalize it. normed = self._flavour.pathmod.normpath(s) obj = self._from_parts((normed,), init=False) obj._init(template=self) return obj def stat(self): """ Return the result of the stat() system call on this path, like os.stat() does. """ return self._accessor.stat(self) def owner(self): """ Return the login name of the file owner. """ import pwd return pwd.getpwuid(self.stat().st_uid).pw_name def group(self): """ Return the group name of the file gid. """ import grp return grp.getgrgid(self.stat().st_gid).gr_name def open(self, mode='r', buffering=-1, encoding=None, errors=None, newline=None): """ Open the file pointed by this path and return a file object, as the built-in open() function does. """ if self._closed: self._raise_closed() return io.open(str(self), mode, buffering, encoding, errors, newline, opener=self._opener) def read_bytes(self): """ Open the file in bytes mode, read it, and close the file. """ with self.open(mode='rb') as f: return f.read() def read_text(self, encoding=None, errors=None): """ Open the file in text mode, read it, and close the file. """ with self.open(mode='r', encoding=encoding, errors=errors) as f: return f.read() def write_bytes(self, data): """ Open the file in bytes mode, write to it, and close the file. """ # type-check for the buffer interface before truncating the file view = memoryview(data) with self.open(mode='wb') as f: return f.write(view) def write_text(self, data, encoding=None, errors=None): """ Open the file in text mode, write to it, and close the file. """ if not isinstance(data, str): raise TypeError('data must be str, not %s' % data.__class__.__name__) with self.open(mode='w', encoding=encoding, errors=errors) as f: return f.write(data) def touch(self, mode=0o666, exist_ok=True): """ Create this file with the given access mode, if it doesn't exist. """ if self._closed: self._raise_closed() if exist_ok: # First try to bump modification time # Implementation note: GNU touch uses the UTIME_NOW option of # the utimensat() / futimens() functions. try: self._accessor.utime(self, None) except OSError: # Avoid exception chaining pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = self._raw_open(flags, mode) os.close(fd) def mkdir(self, mode=0o777, parents=False, exist_ok=False): if self._closed: self._raise_closed() if not parents: try: self._accessor.mkdir(self, mode) except FileExistsError: if not exist_ok or not self.is_dir(): raise else: try: self._accessor.mkdir(self, mode) except FileExistsError: if not exist_ok or not self.is_dir(): raise except OSError as e: if e.errno != ENOENT: raise self.parent.mkdir(parents=True) self._accessor.mkdir(self, mode) def chmod(self, mode): """ Change the permissions of the path, like os.chmod(). """ if self._closed: self._raise_closed() self._accessor.chmod(self, mode) def lchmod(self, mode): """ Like chmod(), except if the path points to a symlink, the symlink's permissions are changed, rather than its target's. """ if self._closed: self._raise_closed() self._accessor.lchmod(self, mode) def unlink(self): """ Remove this file or link. If the path is a directory, use rmdir() instead. """ if self._closed: self._raise_closed() self._accessor.unlink(self) def rmdir(self): """ Remove this directory. The directory must be empty. """ if self._closed: self._raise_closed() self._accessor.rmdir(self) def lstat(self): """ Like stat(), except if the path points to a symlink, the symlink's status information is returned, rather than its target's. """ if self._closed: self._raise_closed() return self._accessor.lstat(self) def rename(self, target): """ Rename this path to the given path. """ if self._closed: self._raise_closed() self._accessor.rename(self, target) def replace(self, target): """ Rename this path to the given path, clobbering the existing destination if it exists. """ if self._closed: self._raise_closed() self._accessor.replace(self, target) def symlink_to(self, target, target_is_directory=False): """ Make this path a symlink pointing to the given path. Note the order of arguments (self, target) is the reverse of os.symlink's. """ if self._closed: self._raise_closed() self._accessor.symlink(target, self, target_is_directory) # Convenience functions for querying the stat results def exists(self): """ Whether this path exists. """ try: self.stat() except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise return False return True def is_dir(self): """ Whether this path is a directory. """ try: return S_ISDIR(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_file(self): """ Whether this path is a regular file (also True for symlinks pointing to regular files). """ try: return S_ISREG(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_symlink(self): """ Whether this path is a symbolic link. """ try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist return False def is_block_device(self): """ Whether this path is a block device. """ try: return S_ISBLK(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_char_device(self): """ Whether this path is a character device. """ try: return S_ISCHR(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_socket(self): """ Whether this path is a socket. """ try: return S_ISSOCK(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def expanduser(self): """ Return a new path with expanded ~ and ~user constructs (as returned by os.path.expanduser) """ if (not (self._drv or self._root) and self._parts and self._parts[0][:1] == '~'): homedir = self._flavour.gethomedir(self._parts[0][1:]) return self._from_parts([homedir] + self._parts[1:]) return self class PosixPath(Path, PurePosixPath): __slots__ = () class WindowsPath(Path, PureWindowsPath): __slots__ = () def owner(self): raise NotImplementedError("Path.owner() is unsupported on this system") def group(self): raise NotImplementedError("Path.group() is unsupported on this system")
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging from django.core.urlresolvers import reverse from django.template.defaultfilters import register # noqa from django.utils import html from django.utils import safestring import six import six.moves.urllib.parse as urlparse from openstack_dashboard.api import swift LOG = logging.getLogger(__name__) resource_urls = { "AWS::AutoScaling::AutoScalingGroup": { 'link': 'horizon:project:stacks:detail'}, "AWS::CloudFormation::Stack": { 'link': 'horizon:project:stacks:detail'}, "AWS::EC2::Instance": { 'link': 'horizon:project:instances:detail'}, "AWS::EC2::InternetGateway": { 'link': 'horizon:project:networks:ports:detail'}, "AWS::EC2::NetworkInterface": { 'link': 'horizon:project:networks:ports:detail'}, "AWS::EC2::RouteTable": { 'link': 'horizon:project:routers:detail'}, "AWS::EC2::SecurityGroup": { 'link': 'horizon:project:access_and_security:index'}, "AWS::EC2::Subnet": { 'link': 'horizon:project:networks:subnets:detail'}, "AWS::EC2::Volume": { 'link': 'horizon:project:volumes:volumes:detail'}, "AWS::EC2::VPC": { 'link': 'horizon:project:networks:detail'}, "AWS::S3::Bucket": { 'link': 'horizon:project:containers:index'}, "OS::Cinder::Volume": { 'link': 'horizon:project:volumes:volumes:detail'}, "OS::Heat::AccessPolicy": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::AutoScalingGroup": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::CloudConfig": { 'link': 'horizon:project:stacks:detail'}, "OS::Neutron::Firewall": { 'link': 'horizon:project:firewalls:firewalldetails'}, "OS::Neutron::FirewallPolicy": { 'link': 'horizon:project:firewalls:policydetails'}, "OS::Neutron::FirewallRule": { 'link': 'horizon:project:firewalls:ruledetails'}, "OS::Heat::HARestarter": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::InstanceGroup": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::MultipartMime": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::ResourceGroup": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::SoftwareConfig": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::StructuredConfig": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::StructuredDeployment": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::Stack": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::WaitCondition": { 'link': 'horizon:project:stacks:detail'}, "OS::Heat::WaitConditionHandle": { 'link': 'horizon:project:stacks:detail'}, "OS::Neutron::HealthMonitor": { 'link': 'horizon:project:loadbalancers:monitordetails'}, "OS::Neutron::IKEPolicy": { 'link': 'horizon:project:vpn:ikepolicydetails'}, "OS::Neutron::IPsecPolicy": { 'link': 'horizon:project:vpn:ipsecpolicydetails'}, "OS::Neutron::IPsecSiteConnection": { 'link': 'horizon:project:vpn:ipsecsiteconnectiondetails'}, "OS::Neutron::Net": { 'link': 'horizon:project:networks:detail'}, "OS::Neutron::Pool": { 'link': 'horizon:project:loadbalancers:pooldetails'}, "OS::Neutron::PoolMember": { 'link': 'horizon:project:loadbalancers:memberdetails'}, "OS::Neutron::Port": { 'link': 'horizon:project:networks:ports:detail'}, "OS::Neutron::Router": { 'link': 'horizon:project:routers:detail'}, "OS::Neutron::Subnet": { 'link': 'horizon:project:networks:subnets:detail'}, "OS::Neutron::VPNService": { 'link': 'horizon:project:vpn:vpnservicedetails'}, "OS::Nova::KeyPair": { 'link': 'horizon:project:access_and_security:index'}, "OS::Nova::Server": { 'link': 'horizon:project:instances:detail'}, "OS::Swift::Container": { 'link': 'horizon:project:containers:index', 'format_pattern': '%s' + swift.FOLDER_DELIMITER}, } def resource_to_url(resource): if not resource or not resource.physical_resource_id: return None mapping = resource_urls.get(resource.resource_type, {}) try: if 'link' not in mapping: return None format_pattern = mapping.get('format_pattern') or '%s' rid = format_pattern % resource.physical_resource_id url = reverse(mapping['link'], args=(rid,)) except Exception as e: LOG.exception(e) return None return url @register.filter def stack_output(output): if not output: return u'' if isinstance(output, dict) or isinstance(output, list): json_string = json.dumps(output, indent=2) safe_output = u'<pre>%s</pre>' % html.escape(json_string) return safestring.mark_safe(safe_output) if isinstance(output, basestring): parts = urlparse.urlsplit(output) if parts.netloc and parts.scheme in ('http', 'https'): url = html.escape(output) safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url) return safestring.mark_safe(safe_link) return unicode(output) resource_images = { 'LB_FAILED': '/static/dashboard/img/lb-red.svg', 'LB_DELETE': '/static/dashboard/img/lb-red.svg', 'LB_IN_PROGRESS': '/static/dashboard/img/lb-gray.gif', 'LB_INIT': '/static/dashboard/img/lb-gray.svg', 'LB_COMPLETE': '/static/dashboard/img/lb-green.svg', 'DB_FAILED': '/static/dashboard/img/db-red.svg', 'DB_DELETE': '/static/dashboard/img/db-red.svg', 'DB_IN_PROGRESS': '/static/dashboard/img/db-gray.gif', 'DB_INIT': '/static/dashboard/img/db-gray.svg', 'DB_COMPLETE': '/static/dashboard/img/db-green.svg', 'STACK_FAILED': '/static/dashboard/img/stack-red.svg', 'STACK_DELETE': '/static/dashboard/img/stack-red.svg', 'STACK_IN_PROGRESS': '/static/dashboard/img/stack-gray.gif', 'STACK_INIT': '/static/dashboard/img/stack-gray.svg', 'STACK_COMPLETE': '/static/dashboard/img/stack-green.svg', 'SERVER_FAILED': '/static/dashboard/img/server-red.svg', 'SERVER_DELETE': '/static/dashboard/img/server-red.svg', 'SERVER_IN_PROGRESS': '/static/dashboard/img/server-gray.gif', 'SERVER_INIT': '/static/dashboard/img/server-gray.svg', 'SERVER_COMPLETE': '/static/dashboard/img/server-green.svg', 'ALARM_FAILED': '/static/dashboard/img/alarm-red.svg', 'ALARM_DELETE': '/static/dashboard/img/alarm-red.svg', 'ALARM_IN_PROGRESS': '/static/dashboard/img/alarm-gray.gif', 'ALARM_INIT': '/static/dashboard/img/alarm-gray.svg', 'ALARM_COMPLETE': '/static/dashboard/img/alarm-green.svg', 'VOLUME_FAILED': '/static/dashboard/img/volume-red.svg', 'VOLUME_DELETE': '/static/dashboard/img/volume-red.svg', 'VOLUME_IN_PROGRESS': '/static/dashboard/img/volume-gray.gif', 'VOLUME_INIT': '/static/dashboard/img/volume-gray.svg', 'VOLUME_COMPLETE': '/static/dashboard/img/volume-green.svg', 'IMAGE_FAILED': '/static/dashboard/img/image-red.svg', 'IMAGE_DELETE': '/static/dashboard/img/image-red.svg', 'IMAGE_IN_PROGRESS': '/static/dashboard/img/image-gray.gif', 'IMAGE_INIT': '/static/dashboard/img/image-gray.svg', 'IMAGE_COMPLETE': '/static/dashboard/img/image-green.svg', 'WAIT_FAILED': '/static/dashboard/img/wait-red.svg', 'WAIT_DELETE': '/static/dashboard/img/wait-red.svg', 'WAIT_IN_PROGRESS': '/static/dashboard/img/wait-gray.gif', 'WAIT_INIT': '/static/dashboard/img/wait-gray.svg', 'WAIT_COMPLETE': '/static/dashboard/img/wait-green.svg', 'FIREWALL_FAILED': '/static/dashboard/img/firewall-red.svg', 'FIREWALL_DELETE': '/static/dashboard/img/firewall-red.svg', 'FIREWALL_IN_PROGRESS': '/static/dashboard/img/firewall-gray.gif', 'FIREWALL_INIT': '/static/dashboard/img/firewall-gray.svg', 'FIREWALL_COMPLETE': '/static/dashboard/img/firewall-green.svg', 'FLOATINGIP_FAILED': '/static/dashboard/img/floatingip-red.svg', 'FLOATINGIP_DELETE': '/static/dashboard/img/floatingip-red.svg', 'FLOATINGIP_IN_PROGRESS': '/static/dashboard/img/floatingip-gray.gif', 'FLOATINGIP_INIT': '/static/dashboard/img/floatingip-gray.svg', 'FLOATINGIP_COMPLETE': '/static/dashboard/img/floatingip-green.svg', 'ROUTER_FAILED': '/static/dashboard/img/router-red.svg', 'ROUTER_DELETE': '/static/dashboard/img/router-red.svg', 'ROUTER_IN_PROGRESS': '/static/dashboard/img/router-gray.gif', 'ROUTER_INIT': '/static/dashboard/img/router-gray.svg', 'ROUTER_COMPLETE': '/static/dashboard/img/router-green.svg', 'POLICY_FAILED': '/static/dashboard/img/policy-red.svg', 'POLICY_DELETE': '/static/dashboard/img/policy-red.svg', 'POLICY_IN_PROGRESS': '/static/dashboard/img/policy-gray.gif', 'POLICY_INIT': '/static/dashboard/img/policy-gray.svg', 'POLICY_COMPLETE': '/static/dashboard/img/policy-green.svg', 'CONFIG_FAILED': '/static/dashboard/img/config-red.svg', 'CONFIG_DELETE': '/static/dashboard/img/config-red.svg', 'CONFIG_IN_PROGRESS': '/static/dashboard/img/config-gray.gif', 'CONFIG_INIT': '/static/dashboard/img/config-gray.svg', 'CONFIG_COMPLETE': '/static/dashboard/img/config-green.svg', 'NETWORK_FAILED': '/static/dashboard/img/network-red.svg', 'NETWORK_DELETE': '/static/dashboard/img/network-red.svg', 'NETWORK_IN_PROGRESS': '/static/dashboard/img/network-gray.gif', 'NETWORK_INIT': '/static/dashboard/img/network-gray.svg', 'NETWORK_COMPLETE': '/static/dashboard/img/network-green.svg', 'PORT_FAILED': '/static/dashboard/img/port-red.svg', 'PORT_DELETE': '/static/dashboard/img/port-red.svg', 'PORT_IN_PROGRESS': '/static/dashboard/img/port-gray.gif', 'PORT_INIT': '/static/dashboard/img/port-gray.svg', 'PORT_COMPLETE': '/static/dashboard/img/port-green.svg', 'SECURITYGROUP_FAILED': '/static/dashboard/img/securitygroup-red.svg', 'SECURITYGROUP_DELETE': '/static/dashboard/img/securitygroup-red.svg', 'SECURITYGROUP_IN_PROGRESS': '/static/dashboard/img/securitygroup-gray.gif', 'SECURITYGROUP_INIT': '/static/dashboard/img/securitygroup-gray.svg', 'SECURITYGROUP_COMPLETE': '/static/dashboard/img/securitygroup-green.svg', 'VPN_FAILED': '/static/dashboard/img/vpn-red.svg', 'VPN_DELETE': '/static/dashboard/img/vpn-red.svg', 'VPN_IN_PROGRESS': '/static/dashboard/img/vpn-gray.gif', 'VPN_INIT': '/static/dashboard/img/vpn-gray.svg', 'VPN_COMPLETE': '/static/dashboard/img/vpn-green.svg', 'FLAVOR_FAILED': '/static/dashboard/img/flavor-red.svg', 'FLAVOR_DELETE': '/static/dashboard/img/flavor-red.svg', 'FLAVOR_IN_PROGRESS': '/static/dashboard/img/flavor-gray.gif', 'FLAVOR_INIT': '/static/dashboard/img/flavor-gray.svg', 'FLAVOR_COMPLETE': '/static/dashboard/img/flavor-green.svg', 'KEYPAIR_FAILED': '/static/dashboard/img/keypair-red.svg', 'KEYPAIR_DELETE': '/static/dashboard/img/keypair-red.svg', 'KEYPAIR_IN_PROGRESS': '/static/dashboard/img/keypair-gray.gif', 'KEYPAIR_INIT': '/static/dashboard/img/keypair-gray.svg', 'KEYPAIR_COMPLETE': '/static/dashboard/img/keypair-green.svg', 'UNKNOWN_FAILED': '/static/dashboard/img/unknown-red.svg', 'UNKNOWN_DELETE': '/static/dashboard/img/unknown-red.svg', 'UNKNOWN_IN_PROGRESS': '/static/dashboard/img/unknown-gray.gif', 'UNKNOWN_INIT': '/static/dashboard/img/unknown-gray.svg', 'UNKNOWN_COMPLETE': '/static/dashboard/img/unknown-green.svg', } resource_types = { # LB 'LoadBalance': 'LB', 'HealthMonitor': 'LB', 'PoolMember': 'LB', 'Pool': 'LB', # DB 'DBInstance': 'DB', 'Database': 'DB', # SERVER 'Instance': 'SERVER', 'Server': 'SERVER', # ALARM 'Alarm': 'ALARM', 'CombinationAlarm': 'ALARM', 'CWLiteAlarm': 'ALARM', # VOLUME 'Volume': 'VOLUME', 'VolumeAttachment': 'VOLUME', # STACK 'stack': 'STACK', 'AutoScalingGroup': 'STACK', 'InstanceGroup': 'STACK', 'ServerGroup': 'STACK', 'ResourceGroup': 'STACK', # IMAGE 'Image': 'IMAGE', # WAIT 'WaitCondition': 'WAIT', 'WaitConditionHandle': 'WAIT', 'UpdateWaitConditionHandle': 'WAIT', # FIREWALL 'Firewall': 'FIREWALL', 'FirewallPolicy': 'FIREWALL', 'FirewallRule': 'FIREWALL', # FLOATINGIP 'FloatingIP': 'FLOATINGIP', 'FloatingIPAssociation': 'FLOATINGIP', # ROUTER 'Router': 'ROUTER', 'RouterGateway': 'ROUTER', 'RouterInterface': 'ROUTER', # POLICY 'ScalingPolicy': 'POLICY', # CONFIG 'CloudConfig': 'CONFIG', 'MultipartMime': 'CONFIG', 'SoftwareConfig': 'CONFIG', 'SoftwareDeployment': 'CONFIG', 'StructuredConfig': 'CONFIG', 'StructuredDeployment': 'CONFIG', # NETWORK 'Net': 'NETWORK', 'Subnet': 'NETWORK', 'NetworkGateway': 'NETWORK', 'ProviderNet': 'NETWORK', # PORT 'Port': 'PORT', # SECURITYGROUP 'SecurityGroup': 'SECURITYGROUP', # VPN 'VPNService': 'VPN', # FLAVOR 'Flavor': 'FLAVOR', # KEYPAIR 'KeyPair': 'KEYPAIR', } def get_resource_type(type): for key, value in six.iteritems(resource_types): if key in type: return value return 'UNKNOWN' def get_resource_status(status): if ('IN_PROGRESS' in status): return 'IN_PROGRESS' elif ('FAILED' in status): return 'FAILED' elif ('DELETE' in status): return 'DELETE' elif ('INIT' in status): return 'INIT' else: return 'COMPLETE' def get_resource_image(status, type): """Sets the image url and in_progress action sw based on status.""" resource_type = get_resource_type(type) resource_status = get_resource_status(status) resource_state = resource_type + "_" + resource_status for key in resource_images: if key == resource_state: return resource_images.get(key)
"""Operator classes for eval. """ import operator as op from functools import partial from datetime import datetime import numpy as np import pandas as pd from pandas.compat import PY3, string_types, text_type import pandas.core.common as com from pandas.core.base import StringMixin from pandas.computation.common import _ensure_decoded, _result_type_many from pandas.computation.scope import _DEFAULT_GLOBALS _reductions = 'sum', 'prod' _mathops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', 'pow', 'div', 'sqrt', 'inv', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan', 'arccosh', 'arcsinh', 'arctanh', 'arctan2', 'abs') _LOCAL_TAG = '__pd_eval_local_' class UndefinedVariableError(NameError): """NameError subclass for local variables.""" def __init__(self, name, is_local): if is_local: msg = 'local variable {0!r} is not defined' else: msg = 'name {0!r} is not defined' super(UndefinedVariableError, self).__init__(msg.format(name)) class Term(StringMixin): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, string_types) else cls supr_new = super(Term, klass).__new__ return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): self._name = name self.env = env self.side = side tname = text_type(name) self.is_local = (tname.startswith(_LOCAL_TAG) or tname in _DEFAULT_GLOBALS) self._value = self._resolve_name() self.encoding = encoding @property def local_name(self): return self.name.replace(_LOCAL_TAG, '') def __unicode__(self): return com.pprint_thing(self.name) def __call__(self, *args, **kwargs): return self.value def evaluate(self, *args, **kwargs): return self def _resolve_name(self): res = self.env.resolve(self.local_name, is_local=self.is_local) self.update(res) if hasattr(res, 'ndim') and res.ndim > 2: raise NotImplementedError("N-dimensional objects, where N > 2," " are not supported with eval") return res def update(self, value): """ search order for local (i.e., @variable) variables: scope, key_variable [('locals', 'local_name'), ('globals', 'local_name'), ('locals', 'key'), ('globals', 'key')] """ key = self.name # if it's a variable name (otherwise a constant) if isinstance(key, string_types): self.env.swapkey(self.local_name, key, new_value=value) self.value = value @property def isscalar(self): return np.isscalar(self._value) @property def type(self): try: # potentially very slow for large, mixed dtype frames return self._value.values.dtype except AttributeError: try: # ndarray return self._value.dtype except AttributeError: # scalar return type(self._value) return_type = type @property def raw(self): return com.pprint_thing('{0}(name={1!r}, type={2})' ''.format(self.__class__.__name__, self.name, self.type)) @property def is_datetime(self): try: t = self.type.type except AttributeError: t = self.type return issubclass(t, (datetime, np.datetime64)) @property def value(self): return self._value @value.setter def value(self, new_value): self._value = new_value @property def name(self): return self._name @name.setter def name(self, new_name): self._name = new_name @property def ndim(self): return self._value.ndim class Constant(Term): def __init__(self, value, env, side=None, encoding=None): super(Constant, self).__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name @property def name(self): return self.value _bool_op_map = {'not': '~', 'and': '&', 'or': '|'} class Op(StringMixin): """Hold an operator of arbitrary arity """ def __init__(self, op, operands, *args, **kwargs): self.op = _bool_op_map.get(op, op) self.operands = operands self.encoding = kwargs.get('encoding', None) def __iter__(self): return iter(self.operands) def __unicode__(self): """Print a generic n-ary operator and its operands using infix notation""" # recurse over the operands parened = ('({0})'.format(com.pprint_thing(opr)) for opr in self.operands) return com.pprint_thing(' {0} '.format(self.op).join(parened)) @property def return_type(self): # clobber types to bool if the op is a boolean operator if self.op in (_cmp_ops_syms + _bool_ops_syms): return np.bool_ return _result_type_many(*(term.type for term in com.flatten(self))) @property def has_invalid_return_type(self): types = self.operand_types obj_dtype_set = frozenset([np.dtype('object')]) return self.return_type == object and types - obj_dtype_set @property def operand_types(self): return frozenset(term.type for term in com.flatten(self)) @property def isscalar(self): return all(operand.isscalar for operand in self.operands) @property def is_datetime(self): try: t = self.return_type.type except AttributeError: t = self.return_type return issubclass(t, (datetime, np.datetime64)) def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if com.is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if com.is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y _cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in' _cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in _cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs)) _bool_ops_syms = '&', '|', 'and', 'or' _bool_ops_funcs = op.and_, op.or_, op.and_, op.or_ _bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs)) _arith_ops_syms = '+', '-', '*', '/', '**', '//', '%' _arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div, op.pow, op.floordiv, op.mod) _arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs)) _special_case_arith_ops_syms = '**', '//', '%' _special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod _special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms, _special_case_arith_ops_funcs)) _binary_ops_dict = {} for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): _binary_ops_dict.update(d) def _cast_inplace(terms, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value) def is_term(obj): return isinstance(obj, Term) class BinOp(Op): """Hold a binary operator and its operands Parameters ---------- op : str left : Term or Op right : Term or Op """ def __init__(self, op, lhs, rhs, **kwargs): super(BinOp, self).__init__(op, (lhs, rhs)) self.lhs = lhs self.rhs = rhs self._disallow_scalar_only_bool_ops() self.convert_values() try: self.func = _binary_ops_dict[op] except KeyError: # has to be made a list for python3 keys = list(_binary_ops_dict.keys()) raise ValueError('Invalid binary operator {0!r}, valid' ' operators are {1}'.format(op, keys)) def __call__(self, env): """Recursively evaluate an expression in Python space. Parameters ---------- env : Scope Returns ------- object The result of an evaluated expression. """ # handle truediv if self.op == '/' and env.scope['truediv']: self.func = op.truediv # recurse over the left/right nodes left = self.lhs(env) right = self.rhs(env) return self.func(left, right) def evaluate(self, env, engine, parser, term_type, eval_in_python): """Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The "pre-evaluated" expression as an instance of ``term_type`` """ if engine == 'python': res = self(env) else: # recurse over the left/right nodes left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) # base cases if self.op in eval_in_python: res = self.func(left.value, right.value) else: res = pd.eval(self, local_dict=env, engine=engine, parser=parser) name = env.add_tmp(res) return term_type(name, env=env) def convert_values(self): """Convert datetimes to a comparable value in an expression. """ def stringify(value): if self.encoding is not None: encoder = partial(com.pprint_thing_encoded, encoding=self.encoding) else: encoder = com.pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = pd.Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = pd.Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v) def _disallow_scalar_only_bool_ops(self): if ((self.lhs.isscalar or self.rhs.isscalar) and self.op in _bool_ops_dict and (not (issubclass(self.rhs.return_type, (bool, np.bool_)) and issubclass(self.lhs.return_type, (bool, np.bool_))))): raise NotImplementedError("cannot evaluate scalar only bool ops") def isnumeric(dtype): return issubclass(np.dtype(dtype).type, np.number) class Div(BinOp): """Div operator to special case casting. Parameters ---------- lhs, rhs : Term or Op The Terms or Ops in the ``/`` expression. truediv : bool Whether or not to use true division. With Python 3 this happens regardless of the value of ``truediv``. """ def __init__(self, lhs, rhs, truediv, *args, **kwargs): super(Div, self).__init__('/', lhs, rhs, *args, **kwargs) if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): raise TypeError("unsupported operand type(s) for {0}:" " '{1}' and '{2}'".format(self.op, lhs.return_type, rhs.return_type)) if truediv or PY3: _cast_inplace(com.flatten(self), np.float_) _unary_ops_syms = '+', '-', '~', 'not' _unary_ops_funcs = op.pos, op.neg, op.invert, op.invert _unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs)) class UnaryOp(Op): """Hold a unary operator and its operands Parameters ---------- op : str The token used to represent the operator. operand : Term or Op The Term or Op operand to the operator. Raises ------ ValueError * If no function associated with the passed operator token is found. """ def __init__(self, op, operand): super(UnaryOp, self).__init__(op, (operand,)) self.operand = operand try: self.func = _unary_ops_dict[op] except KeyError: raise ValueError('Invalid unary operator {0!r}, valid operators ' 'are {1}'.format(op, _unary_ops_syms)) def __call__(self, env): operand = self.operand(env) return self.func(operand) def __unicode__(self): return com.pprint_thing('{0}({1})'.format(self.op, self.operand)) @property def return_type(self): operand = self.operand if operand.return_type == np.dtype('bool'): return np.dtype('bool') if (isinstance(operand, Op) and (operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)): return np.dtype('bool') return np.dtype('int')
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- from collections import OrderedDict import logging from neon import __version__ as __neon_version__ from neon import NervanaObject from neon.backends.backend import Block from neon.transforms import CrossEntropyBinary, Logistic from neon.util.persist import load_obj, save_obj, load_class from neon.util.modeldesc import ModelDescription from neon.layers import Sequential, Activation, Tree, SingleOutputTree import numpy as np logger = logging.getLogger(__name__) class Model(NervanaObject): """ Basic model class which stores a list of layers describing the model. Can train the layer weights on a dataset, evaluate on a test set and serialize the mode. Additional functionality can be added to fit through callback functions. Arguments: layers: layer container, or a list of layers (that will be containerized), or a serialized model description dataset (iterator): Data set (ignored, will be removed) weights_only (bool): set to True if you do not want to recreate layers and states during deserialization from a serialized model description. Defaults to False. name (str): Model name. Defaults to "model" optimizer (Optimizer): Optimizer object which defines the learning rule for updating model parameters (ie DescentMomentum, AdaDelta) """ def __init__(self, layers, dataset=None, weights_only=False, name="model", optimizer=None): super(Model, self).__init__(name) self.optimizer = optimizer self.params = None # should be able to remove self.states = None # should be able to remove self.epoch_index = 0 self.finished = False self.initialized = False self.cost = None self.nbatches = 0 self.ndata = 0 if dataset is not None: logger.warning('dataset is a deprecated argument and will be ignored') if type(layers) in (ModelDescription, dict): # load up the model from a serialized file (dataset could be None here) self.deserialize(layers, load_states=(not weights_only)) elif type(layers) is str: self.load_params(layers, load_states=(not weights_only)) else: # Wrap the list of layers in a Sequential container if a raw list of layers if type(layers) in (Sequential, Tree, SingleOutputTree): self.layers = layers else: self.layers = Sequential(layers) self.layers.propagate_parallelism("Data") @property def layers_to_optimize(self): return self.layers.layers_to_optimize def set_shortcut(self): # infer whether bprop shortcut can be used on final activation # self.cost should be set to run this otherwise do nothing lastlayer = self.layers[-1] try: if self.cost.costfunc.__class__ is CrossEntropyBinary: if (lastlayer.__class__ is Activation and lastlayer.transform.__class__ is Logistic): lastlayer.transform.set_shortcut(True) except: # if any attributes are not set or any other exception # is thrown leave transform.shortcut as is (do nothing) pass def initialize(self, dataset, cost=None): if self.initialized: return # Propagate shapes through the layers to configure prev_input = dataset prev_input = self.layers.configure(prev_input) if cost is not None: cost.initialize(prev_input) self.cost = cost # Now allocate space self.layers.allocate() self.layers.allocate_deltas() self.initialized = True def __str__(self): """ String representation of model's layers """ config_string = "Network Layers:\n" + self.layers.nested_str() return config_string def fit(self, dataset, cost, optimizer, num_epochs, callbacks): """ Trains the model parameters on a dataset by minimizing the cost function through gradient descent and updates the layer weights according to a learning rule defined in optimizer. Arguments: dataset (iterator): An iterable of minibatches where each element is a (x, y) tuple where x is the input data and y are the labels. x is of dimension (feature_size, batch_size) y is of dimension (label_size, batch_size) Length of the iterator is num_batches which is num_data / batch_size cost (Cost): Defines the function which the model is minimizing based on the output of the last layer and the input labels optimizer (Optimizer): Defines the learning rule for updating the model parameters num_epochs: Number of times to iterate over the dataset. callbacks (Callbacks): Defines callbacks to run at the end of each mini-batch / epoch. """ self.nbatches = dataset.nbatches self.ndata = dataset.ndata # self.set_shortcut() # infer if bprop shortcut can be used self.total_cost = self.be.empty((1, 1), dtype=np.float32) self.optimizer = optimizer self.initialize(dataset, cost) callbacks.on_train_begin(num_epochs) while self.epoch_index < num_epochs and not self.finished: self.nbatches = dataset.nbatches callbacks.on_epoch_begin(self.epoch_index) self._epoch_fit(dataset, callbacks) callbacks.on_epoch_end(self.epoch_index) self.epoch_index += 1 callbacks.on_train_end() def _epoch_fit(self, dataset, callbacks): """ Helper function for fit which performs training on a dataset for one epoch. Arguments: dataset (iterable): Dataset iterator to perform fit on """ epoch = self.epoch_index self.total_cost[:] = 0 # iterate through minibatches of the dataset for mb_idx, (x, t) in enumerate(dataset): callbacks.on_minibatch_begin(epoch, mb_idx) self.be.begin(Block.minibatch, mb_idx) x = self.fprop(x) self.total_cost[:] = self.total_cost + self.cost.get_cost(x, t) # deltas back propagate through layers # for every layer in reverse except the 0th one delta = self.cost.get_errors(x, t) self.bprop(delta) self.optimizer.optimize(self.layers_to_optimize, epoch=epoch) self.be.end(Block.minibatch, mb_idx) callbacks.on_minibatch_end(epoch, mb_idx) # now we divide total cost by the number of batches, # so it was never total cost, but sum of averages # across all the minibatches we trained on self.total_cost[:] = self.total_cost / dataset.nbatches def fprop(self, x, inference=False): """ Forward propagates a minibatch x through the model. Arguments: x (Tensor): Input minibatch data inference (bool): Flag for performing training or inference Only affects batch norm and dropout layers. Returns: Tensor: the output of the final layer in the model """ return self.layers.fprop(x, inference) def bprop(self, delta): """ Back propagates the error of a minibatch through the model. Arguments: delta (Tensor): Derivative of cost with respect to the last layer's output """ return self.layers.bprop(delta) def eval(self, dataset, metric): """ Evaluates a model on a dataset according to an input metric. Arguments: datasets (iterable): dataset to evaluate on. metric (Cost): what function to evaluate dataset on. """ self.initialize(dataset) running_error = np.zeros((len(metric.metric_names)), dtype=np.float32) nprocessed = 0 dataset.reset() for x, t in dataset: x = self.fprop(x, inference=True) # This logic is for handling partial batch sizes at the end of the dataset nsteps = x.shape[1] / self.be.bsz if not isinstance(x, list) else \ x[0].shape[1] / self.be.bsz bsz = min(dataset.ndata - nprocessed, self.be.bsz) running_error += metric(x, t, calcrange=slice(0, nsteps * bsz)) * nsteps * bsz nprocessed += bsz * nsteps running_error /= nprocessed return running_error def get_outputs(self, dataset): """ Get the activation outputs of the final model layer for the dataset Arguments: dataset (iterable): Dataset iterator to perform fit on Returns: Host numpy array: the output of the final layer for the entire Dataset """ self.initialize(dataset) dataset.reset() # Move "pointer" back to beginning of dataset n = dataset.nbatches x = self.layers.layers[-1].outputs assert not isinstance(x, list), "Can not get_outputs with Branch terminal" Ypred = None for idx, (x, t) in enumerate(dataset): x = self.fprop(x, inference=True) if Ypred is None: (dim0, dim1) = x.shape Ypred = np.empty((n * dim1, dim0), dtype=x.dtype) nsteps = dim1 / self.be.bsz cur_batch = slice(idx * dim1, (idx + 1) * dim1) Ypred[cur_batch] = x.get().T # Handle the recurrent case. if nsteps != 1: b, s = (self.be.bsz, nsteps) Ypred = Ypred.reshape((n, s, b, -1)).transpose(0, 2, 1, 3).copy().reshape(n*b, s, -1) return Ypred[:dataset.ndata] def get_description(self, get_weights=False, keep_states=False): """ Gets a description of the model required to reconstruct the model with no weights like from a yaml file. Returns: dict: Description of each component of the model. """ pdict = dict() pdict['neon_version'] = __neon_version__ compat_mode = self.be.compat_mode if self.be.compat_mode is not None else 'neon' pdict['backend'] = {'type': self.be.__class__.__name__, 'compat_mode': compat_mode, 'rng_seed': self.be.rng_seed, 'rng_state': self.be.rng_get_state()} if self.cost: pdict['cost'] = self.cost.get_description() if self.optimizer: pdict['optimizer'] = self.optimizer.get_description() pdict['model'] = self.layers.get_description(get_weights=get_weights, keep_states=keep_states) return pdict def save_params(self, param_path, keep_states=True): """ Serializes and saves model parameters to the path specified. Arguments: param_path (str): File to write serialized parameter dict to. keep_states (bool): Whether to save optimizer states too. Defaults to True. """ self.serialize(keep_states=keep_states, fn=param_path) def load_params(self, param_path, load_states=True): """ Loads the model parameters (per layer weights, epochs run, optimizer states) saved in param_path from serialize(). Arguments: param_path (str): File containing serialized python dict with layer weights and states. load_states (bool): if False, then only the weights will be loaded into a model in which the layers have already been created, otherwise will (re)create the layers from the serialized parameters and set the learning states as well """ self.deserialize(load_obj(param_path), load_states=load_states) logger.info('Model weights loaded from %s', param_path) def load_weights(self, weight_path): """ .. deprecated:: 1.1.4 Use :func:`load_params` instead """ logger.warning('Calling deprecated load_weights function. Use ' 'load_params instead') self.load_params(weight_path) def deserialize(self, model_dict, data=None, load_states=True): """ Loads per layer (weights, states) and other model parameters from the dictionary passed. Arguments: model_dict (dict): dictionary describing the model including layers, cost, optimizers, backend settings, etc. generated by the serialize function data (iterator): Data set (ignored, will be removed) load_states (bool): if False, then only the weights will be loaded into a model in which the layers have already been created, otherwise will (re)create the layers from the serialized parameters and set the learning states as well """ if data is not None: logger.warning('data is a deprecated argument and will be ignored') if 'epoch_index' in model_dict: self.epoch_index = model_dict['epoch_index'] if 'model' not in model_dict: logger.error('Using old model serialization format. ' 'Serialized the model into new format') param_layers = [l for l in self.layers_to_optimize] param_dict_list = model_dict['layer_params_states'] for l, ps in zip(param_layers, param_dict_list): l.set_params(ps) if 'states' in ps and load_states: l.set_states(ps) return if 'backend' in model_dict: if 'compat_mode' in model_dict['backend']: self.be.compat_mode = model_dict['backend']['compat_mode'] else: model_dict['backend'] = {} typ = model_dict['model']['type'] main_container = load_class(typ) if not hasattr(self, 'layers'): self.layers = main_container.gen_class(model_dict['model']['config']) self.layers.load_weights(model_dict['model'], load_states) if load_states and 'rng_state' in model_dict['backend']: try: self.be.rng_set_state(model_dict['backend']['rng_state']) except ValueError as e: # could come about when switching backend types (ex GPU to CPU) logger.warning("Problems restoring existing RNG state: %s", str(e)) # serialize tells how to write out the parameters we've learned so # far and associate them with layers. it can ignore layers with no # learned parameters. the model stores states to pass to the # optimizers. if we're saving the model out for inference, we # don't need to remember states. def serialize(self, fn=None, keep_states=True): """ Creates a dictionary storing the layer parameters and epochs complete. Arguments: fn (str): file to save pkl formatted model dictionary keep_states (bool): Whether to save optimizer states. Returns: dict: Model data including layer parameters and epochs complete. """ # get the model dict with the weights pdict = self.get_description(get_weights=True, keep_states=keep_states) pdict['epoch_index'] = self.epoch_index + 1 if self.initialized: pdict['train_input_shape'] = self.layers.in_shape if fn is not None: save_obj(pdict, fn) return return pdict def set_batch_size(self, N): """ Set the actual minibatch size, so eventhough the buffers are allocated considering excessive padding, the processing for some layers may be shortened. Currently most of the neon layers don't use that to control the processing. The interface is here only for when someone wants to set that information and experiment. """ return self.layers.set_batch_size(N) def set_seq_len(self, S): """ Set the actual minibatch sequence length, so eventhough the buffers are allocated considering excessive padding, the processing for some layers may be shortened. Currently most of the neon layers don't use that to control the processing. The interface is here only for when someone wants to set that information and experiment. """ return self.layers.set_seq_len(S) def benchmark(self, dataset, inference=False, cost=None, optimizer=None, niterations=20, nskip=2): """ Measure runtime for computing fprop and bprop seperately, as well as full minibatch run times. For inference case, only the fprop Arguments: dataset (iterable): Dataset iterator to perform fit on cost (Cost): Defines the function which the model is minimizing based on the output of the last layer and the input labels niterations (optional, int): Number of minibatches to average over nskip (optional, int): number of iterations at the beginning to skip when calculating the runtime statistics Returns: dictionary with fprop, bprop run times """ # initialize model if inference is False: assert cost is not None and optimizer is not None, "Need cost and optimizer to \ benchmark bprop and update" self.cost = cost self.initialize(dataset, cost) self.optimizer = optimizer self.total_cost = self.be.empty((1, 1)) self.total_cost[:] = 0 # iterate through minibatches of the dataset times = OrderedDict() time_keys = ['fprop'] if inference else ['fprop', 'bprop', 'iteration'] for ky in time_keys: times[ky] = np.full(niterations + nskip, -1.0) count = 0 fprop_start = self.be.init_mark() fprop_end = self.be.init_mark() bprop_end = self.be.init_mark() while count < niterations + nskip: dataset.reset() for mb_idx, (x, t) in enumerate(dataset): self.be.record_mark(fprop_start) # mark start of fprop x = self.fprop(x) if inference is False: self.total_cost[:] = self.total_cost + self.cost.get_cost(x, t) self.be.record_mark(fprop_end) # mark end of fprop and start of bprop if inference is False: delta = self.cost.get_errors(x, t) self.bprop(delta) self.optimizer.optimize(self.layers_to_optimize, epoch=0) self.be.record_mark(bprop_end) # mark end of bprop self.be.synchronize_mark(bprop_end) else: self.be.synchronize_mark(fprop_end) times['fprop'][count] = self.be.get_time(fprop_start, fprop_end) if inference is False: times['bprop'][count] = self.be.get_time(fprop_end, bprop_end) times['iteration'][count] = times['fprop'][count] + times['bprop'][count] count += 1 if count >= niterations + nskip: break # print results header = ('Func', 'Mean', 'Median', 'Min', 'Max', 'Units') stats = tuple(stat.lower() for stat in header[1:-1]) fmt_titles = '| {:^11} '*len(header) + '|' fmt_nums = '| {func:<11} ' + '| {%s:<10.5g} '*len(stats) % (stats) + '| {units:^11} |' head_str = fmt_titles.format(*header) sep = '-'*len(head_str) head_str = sep + '\n' + head_str + '\n' + sep print(head_str) out_stats = {} for step in times: timesu = np.array(times[step][nskip:]) # in ms out_stats[step] = {} for stat in stats: out_stats[step][stat] = getattr(np, stat)(timesu) print(fmt_nums.format(units='msec', func=step, **out_stats[step])) print(sep) return out_stats
from DBUtil import * ##### junction - 202666877 def createjunction4Data(junction_id): connection = MongoClient() db = connection.c3stem_database db.junction.insert({ "_id": junction_id, "west_lane_out":"-19501516#2", "west_lane_out_values":["-19501516#2_0", "-19501516#2_1"], "west_lane_in":"19501516#2", "west_lane_in_values":["19501516#2_1","19501516#2_0"], # left, straight, right "west_lane_in_adjascent":["-19503247#2", "19501516#3", "19503247#3"], "east_lane_in":"-19501516#3", "east_lane_in_values":["-19501516#3_0","-19501516#3_1", "-19501516#4_0","-19501516#4_1"], "east_lane_in_adjascent":["19503247#3", "-19501516#2", "-19503247#2"], "east_lane_out":"19501516#3", "east_lane_out_values":["19501516#3_1", "19501516#3_0"], "north_lane_in":"19503247#2", "north_lane_in_values":["19503247#2_0","19503247#2_1"], "north_lane_in_adjascent":["19501516#3", "19503247#3", "-19501516#2"], "north_lane_out":"-19503247#2", "north_lane_out_values":["-19503247#2_1","-19503247#2_0"], "south_lane_out":"19503247#3", "south_lane_out_values":["19503247#3_0","19503247#3_1"], "south_lane_in":"-19503247#3", "south_lane_in_values":["-19503247#3_1", "-19503247#3_0"], "south_lane_in_adjascent":["-19501516#2", "-19503247#2", "19501516#3"] }) db.inductionloop.insert({ "_id": "-19501516#2_0_5", "junction": junction_id, "location": "west_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "-19501516#2_1_5", "junction": junction_id, "location": "west_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "19501516#2_1_-5", "junction": junction_id, "location": "west_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "19501516#2_0_-5", "junction": junction_id, "location": "west_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "-19501516#3_0_-5", "junction": junction_id, "location": "east_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "-19501516#3_1_-5", "junction": junction_id, "location": "east_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "19501516#3_1_5", "junction": junction_id, "location": "east_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "19501516#3_0_5", "junction": junction_id, "location": "east_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "19503247#2_0_-5", "junction": junction_id, "location": "north_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "19503247#2_1_-5", "junction": junction_id, "location": "north_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "-19503247#2_1_5", "junction": junction_id, "location": "north_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "-19503247#2_0_5", "junction": junction_id, "location": "north_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "19503247#3_0_5", "junction": junction_id, "location": "south_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "19503247#3_1_5", "junction": junction_id, "location": "south_lane_out", "pos": 5 }) db.inductionloop.insert({ "_id": "-19503247#3_1_-5", "junction": junction_id, "location": "south_lane_in", "pos": -5 }) db.inductionloop.insert({ "_id": "-19503247#3_0_-5", "junction": junction_id, "location": "south_lane_in", "pos": -5 }) def createJunction4TurnProbability(junction_id, simulation_id): connection = MongoClient() db = connection.c3stem_database # First intersection db.turnprobability.insert({ "simulation_id": simulation_id, "intersection_id": junction_id, "edge_id": "19501516#2", "left_turn": "0.2", "go_straight": "0.6", "right_turn": "0.2", "to_edge_left": "-19503247#2", "to_edge_straight": "19501516#3", "to_edge_right": "19503247#3" }) db.turnprobability.insert({ "simulation_id": simulation_id, "intersection_id": junction_id, "edge_id": "-19501516#3", "left_turn": "0.2", "go_straight": "0.6", "right_turn": "0.2", "to_edge_left": "19503247#3", "to_edge_straight": "-19501516#2", "to_edge_right": "-19503247#2" }) db.turnprobability.insert({ "simulation_id": simulation_id, "intersection_id": junction_id, "edge_id": "19503247#2", "left_turn": "0.2", "go_straight": "0.6", "right_turn": "0.2", "to_edge_left": "19501516#3", "to_edge_straight": "19503247#3", "to_edge_right": "-19501516#2" }) db.turnprobability.insert({ "simulation_id": simulation_id, "intersection_id": junction_id, "edge_id": "-19503247#3", "left_turn": "0.2", "go_straight": "0.6", "right_turn": "0.2", "to_edge_left": "-19501516#2", "to_edge_straight": "-19503247#2", "to_edge_right": "19501516#3" }) def createJunction4FlowData(junction_id, simulation_id): # | B | C # | | # | Iwest | # --A-- O1----------------- O2---D--- # | Ieast | # | | # Lsouth| Lnorth Jsouth | Jnorth # | | # | Keast | # --H-- O4----------------- O3---E--- # | Kwest | # | | # | G | F connection = MongoClient() db = connection.c3stem_database db.flows.insert({ "point_name": "Lsouth", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "19503247#2", "to_edge_id": "n/a", "via_edge_id": "19503247#2", "flow_rate": "600", "latitude": "35.037911563597234", "longitude": "-85.28341591358185", "removable": "0" }) db.flows.insert({ "point_name": "Lnorth", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "19457616#3", "to_edge_id": "n/a", "via_edge_id": "19457616#3", "flow_rate": "600", "latitude": "36.139572", "longitude": "-86.810", "removable": "1" }) db.flows.insert({ "point_name": "Kwest", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "-19501516#4", "to_edge_id": "n/a", "via_edge_id": "-19501516#4", "flow_rate": "600", "latitude": "35.034424052917046", "longitude": "-85.27881860733032", "removable": "0" }) db.flows.insert({ "point_name": "Keast", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "-19456179#0", "to_edge_id": "n/a", "via_edge_id": "-19456179#0", "flow_rate": "600", "latitude": "36.137900", "longitude": "-86.803260", "removable": "1" }) db.flows.insert({ "point_name": "G", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "-19503247#3", "to_edge_id": "n/a", "via_edge_id": "-19503247#3", "flow_rate": "600", "latitude": "35.03558364477337", "longitude": "-85.28470873832703", "removable": "0" }) db.flows.insert({ "point_name": "H", "simulation_id": simulation_id, "intersection_id": junction_id, "from_edge_id": "19501516#2", "to_edge_id": "n/a", "via_edge_id": "19501516#2", "flow_rate": "600", "latitude": "35.037077034286156", "longitude": "-85.28547048568726", "removable": "0" })
#!/bin/env python from __future__ import print_function from qds_sdk.qubole import Qubole from qds_sdk.commands import * from qds_sdk.cluster import * import qds_sdk.exception from qds_sdk.scheduler import SchedulerCmdLine from qds_sdk.actions import ActionCmdLine from qds_sdk.report import ReportCmdLine from qds_sdk.dbtaps import DbTapCmdLine from qds_sdk.role import RoleCmdLine from qds_sdk.group import GroupCmdLine from qds_sdk.spaces import SpaceCmdLine from qds_sdk.space_subscribers import SpaceSubscriberCmdLine from qds_sdk.published_hivetables import PublishedHivetableCmdLine from qds_sdk.subscribed_hivetables import SubscribedHivetableCmdLine from qds_sdk.cloud_creds import CloudCredCmdLine from qds_sdk.cross_account_configs import CrossAccountConfigCmdLine from qds_sdk.accounts import AccountCmdLine import os import sys import traceback import logging import json from optparse import OptionParser log = logging.getLogger("qds") CommandClasses = { "hivecmd": HiveCommand, "sparkcmd": SparkCommand, "dbtapquerycmd": DbTapQueryCommand, "pigcmd": PigCommand, "hadoopcmd": HadoopCommand, "shellcmd": ShellCommand, "dbexportcmd": DbExportCommand, "dbimportcmd": DbImportCommand, "prestocmd": PrestoCommand } usage_str = ("Usage: \n" "qds [options] <CmdArgs|ClusterArgs|ReportArgs>\n" "\nCmdArgs:\n" + " <hivecmd|hadoopcmd|prestocmd|pigcmd|shellcmd|dbexportcmd|dbtapquerycmd|sparkcmd> <submit|run|check|cancel|getresult|getlog> [args .. ]\n" " submit [cmd-specific-args .. ] : submit cmd & print id \n" " run [cmd-specific-args .. ] : submit cmd & wait. print results \n" " check <id> : print the cmd object for this Id\n" " cancel <id> : cancels the cmd with this Id\n" " getresult <id> : get the results for the cmd with this Id\n" " getlog <id> : get the logs for the cmd with this Id\n" "\nClusterArgs:\n" + " cluster <create|delete|update|list|start|terminate|status|reassign_label> [args .. ]\n" " create [cmd-specific-args ..] : create a new cluster\n" " delete [cmd-specific-args ..] : delete an existing cluster\n" " update [cmd-specific-args ..] : update the settings of an existing cluster\n" " clone [cmd-specific-args ..] : clone a cluster from an existing one\n" " list [cmd-specific-args ..] : list existing cluster(s)\n" " start [cmd-specific-args ..] : start an existing cluster\n" " terminate [cmd-specific-args ..] : terminate a running cluster\n" " status [cmd-specific-args ..] : show whether the cluster is up or down\n" + " reassign_label [cmd-specific-args ..] : reassign label from one cluster to another\n" + "\nDbTap:\n" + " dbtap --help\n" + "\nReportArgs:\n" + " report (<report-name> [options] | list)\n" + "\nGroup:\n" + " group --help\n" + "\nRole:\n" + " role --help\n" + "\nScheduler:\n" + " scheduler --help\n") def usage(parser=None): if parser is None: sys.stderr.write(usage_str) else: parser.print_help() sys.exit(1) def checkargs_id(args): if len(args) != 1: sys.stderr.write("expecting single argument command id\n") usage() def submitaction(cmdclass, args): args = cmdclass.parse(args) if args is not None: args.pop("print_logs") # This is only useful while using the 'run' action. cmd = cmdclass.create(**args) print("Submitted %s, Id: %s" % (cmdclass.__name__, cmd.id)) return 0 def _getresult(cmdclass, cmd): if Command.is_success(cmd.status): log.info("Fetching results for %s, Id: %s" % (cmdclass.__name__, cmd.id)) cmd.get_results(sys.stdout, delim='\t') return 0 else: log.error("Cannot fetch results - command Id: %s failed with status: %s" % (cmd.id, cmd.status)) return 1 def runaction(cmdclass, args): args = cmdclass.parse(args) if args is not None: print_logs = args.pop("print_logs") # We don't want to send this to the API. cmd = cmdclass.run(**args) if print_logs: sys.stderr.write(cmd.get_log()) return _getresult(cmdclass, cmd) def checkaction(cmdclass, args): checkargs_id(args) o = cmdclass.find(args.pop(0)) print(str(o)) return 0 def cancelaction(cmdclass, args): checkargs_id(args) r = cmdclass.cancel_id(args.pop(0)) skey = 'kill_succeeded' if r.get(skey) is None: sys.stderr.write("Invalid Json Response %s - missing field '%s'" % (str(r), skey)) return 11 elif r['kill_succeeded']: print("Command killed successfully") return 0 else: sys.stderr.write("Cancel failed with reason '%s'\n" % r.get('result')) return 12 def getresultaction(cmdclass, args): checkargs_id(args) cmd = cmdclass.find(args.pop(0)) return _getresult(cmdclass, cmd) def getlogaction(cmdclass, args): checkargs_id(args) print(cmdclass.get_log_id(args.pop(0))) return 0 def getjobsaction(cmdclass, args): checkargs_id(args) cmd = cmdclass.find(args.pop(0)) if Command.is_done(cmd.status): log.info("Fetching jobs for %s, Id: %s" % (cmdclass.__name__, cmd.id)) print(cmdclass.get_jobs_id(cmd.id)) return 0 else: log.error("Cannot fetch jobs - command Id: %s is not done. Status: %s" % (cmd.id, cmd.status)) return 1 def cmdmain(cmd, args): cmdclass = CommandClasses[cmd] actionset = set(["submit", "run", "check", "cancel", "getresult", "getlog", "getjobs"]) if len(args) < 1: sys.stderr.write("missing argument containing action\n") usage() action = args.pop(0) if action not in actionset: sys.stderr.write("action must be one of <%s>\n" % "|".join(actionset)) usage() return globals()[action + "action"](cmdclass, args) def checkargs_cluster_id_label(args): if len(args) != 1: sys.stderr.write("expecting single argument cluster id or cluster label\n") usage() def cluster_create_action(clusterclass, args): arguments = clusterclass._parse_create_update(args, action="create") cluster_info = _create_cluster_info(arguments) result = clusterclass.create(cluster_info.minimal_payload()) print(json.dumps(result, indent=4)) return 0 def cluster_update_action(clusterclass, args): arguments = clusterclass._parse_create_update(args, action="update") cluster_info = _create_cluster_info(arguments) result = clusterclass.update(arguments.cluster_id_label, cluster_info.minimal_payload()) print(json.dumps(result, indent=4)) return 0 def cluster_clone_action(clusterclass, args): arguments = clusterclass._parse_create_update(args, action="clone") cluster_info = _create_cluster_info(arguments) result = clusterclass.clone(arguments.cluster_id_label, cluster_info.minimal_payload()) print(json.dumps(result, indent=4)) return 0 def _create_cluster_info(arguments): cluster_info = ClusterInfo(arguments.label, arguments.aws_access_key_id, arguments.aws_secret_access_key, arguments.disallow_cluster_termination, arguments.enable_ganglia_monitoring, arguments.node_bootstrap_file,) cluster_info.set_ec2_settings(arguments.aws_region, arguments.aws_availability_zone, arguments.vpc_id, arguments.subnet_id) custom_config = None if arguments.custom_config_file is not None: try: custom_config = open(arguments.custom_config_file).read() except IOError as e: sys.stderr.write("Unable to read custom config file: %s\n" % str(e)) usage() cluster_info.set_hadoop_settings(arguments.master_instance_type, arguments.slave_instance_type, arguments.initial_nodes, arguments.max_nodes, custom_config, arguments.slave_request_type, arguments.use_hbase, arguments.custom_ec2_tags, arguments.use_hadoop2) cluster_info.set_spot_instance_settings( arguments.maximum_bid_price_percentage, arguments.timeout_for_request, arguments.maximum_spot_instance_percentage) cluster_info.set_stable_spot_instance_settings( arguments.stable_maximum_bid_price_percentage, arguments.stable_timeout_for_request, arguments.stable_allow_fallback) fairscheduler_config_xml = None if arguments.fairscheduler_config_xml_file is not None: try: fairscheduler_config_xml = open(arguments.fairscheduler_config_xml_file).read() except IOError as e: sys.stderr.write("Unable to read config xml file: %s\n" % str(e)) usage() cluster_info.set_fairscheduler_settings(fairscheduler_config_xml, arguments.default_pool) customer_ssh_key = None if arguments.customer_ssh_key_file is not None: try: customer_ssh_key = open(arguments.customer_ssh_key_file).read() except IOError as e: sys.stderr.write("Unable to read customer ssh key file: %s\n" % str(e)) usage() cluster_info.set_security_settings(arguments.encrypted_ephemerals, customer_ssh_key) presto_custom_config = None if arguments.presto_custom_config_file is not None: try: presto_custom_config = open(arguments.presto_custom_config_file).read() except IOError as e: sys.stderr.write("Unable to read presto custom config file: %s\n" % str(e)) usage() cluster_info.set_presto_settings(arguments.enable_presto, presto_custom_config) return cluster_info def cluster_delete_action(clusterclass, args): checkargs_cluster_id_label(args) result = clusterclass.delete(args.pop(0)) print(json.dumps(result, indent=4)) return 0 def cluster_list_action(clusterclass, args): arguments = clusterclass._parse_list(args) if arguments['cluster_id'] is not None: result = clusterclass.show(arguments['cluster_id']) elif arguments['label'] is not None: result = clusterclass.show(arguments['label']) elif arguments['state'] is not None: result = clusterclass.list(state=arguments['state']) else: result = clusterclass.list() print(json.dumps(result, indent=4)) return 0 def cluster_start_action(clusterclass, args): checkargs_cluster_id_label(args) result = clusterclass.start(args.pop(0)) print(json.dumps(result, indent=4)) return 0 def cluster_terminate_action(clusterclass, args): checkargs_cluster_id_label(args) result = clusterclass.terminate(args.pop(0)) print(json.dumps(result, indent=4)) return 0 def cluster_status_action(clusterclass, args): checkargs_cluster_id_label(args) result = clusterclass.status(args.pop(0)) print(json.dumps(result, indent=4)) return 0 def cluster_reassign_label_action(clusterclass, args): arguments = clusterclass._parse_reassign_label(args) result = clusterclass.reassign_label(arguments.destination_cluster, arguments.label) print(json.dumps(result, indent=4)) return 0 def clustermain(args): clusterclass = Cluster actionset = set(["create", "delete", "update", "clone", "list", "start", "terminate", "status", "reassign_label"]) if len(args) < 1: sys.stderr.write("missing argument containing action\n") usage() action = args.pop(0) if action not in actionset: sys.stderr.write("action must be one of <%s>\n" % "|".join(actionset)) usage() return globals()["cluster_" + action + "_action"](clusterclass, args) def reportmain(args): result = ReportCmdLine.run(args) print(result) def actionmain(args): result = ActionCmdLine.run(args) print(result) def schedulermain(args): result = SchedulerCmdLine.run(args) print(result) def dbtapmain(args): result = DbTapCmdLine.run(args) print(result) def spacemain(args): result = SpaceCmdLine.run(args) print(result) def spacesubscribermain(args): result = SpaceSubscriberCmdLine.run(args) print(result) def publishedhivetablemain(args): result = PublishedHivetableCmdLine.run(args) print(result) def subscribedhivetablemain(args): result = SubscribedHivetableCmdLine.run(args) print(result) def cloudcredmain(args): result = CloudCredCmdLine.run(args) print(result) def crossaccountconfigmain(args): result = CrossAccountConfigCmdLine.run(args) print(result) def rolemain(args): result = RoleCmdLine.run(args) print(result) def groupmain(args): result = GroupCmdLine.run(args) print(result) def accountmain(args): result = AccountCmdLine.run(args) print(result) def main(): optparser = OptionParser(usage=usage_str) optparser.add_option("--token", dest="api_token", default=os.getenv('QDS_API_TOKEN'), help="api token for accessing Qubole. must be specified via command line or passed in via environment variable QDS_API_TOKEN") optparser.add_option("--url", dest="api_url", default=os.getenv('QDS_API_URL'), help="base url for QDS REST API. defaults to https://api.qubole.com/api ") optparser.add_option("--version", dest="api_version", default=os.getenv('QDS_API_VERSION'), help="version of REST API to access. defaults to v1.2") optparser.add_option("--poll_interval", dest="poll_interval", default=os.getenv('QDS_POLL_INTERVAL'), help="interval for polling API for completion and other events. defaults to 5s") optparser.add_option("--skip_ssl_cert_check", dest="skip_ssl_cert_check", action="store_true", default=False, help="skip verification of server SSL certificate. Insecure: use with caution.") optparser.add_option("-v", dest="verbose", action="store_true", default=False, help="verbose mode - info level logging") optparser.add_option("--vv", dest="chatty", action="store_true", default=False, help="very verbose mode - debug level logging") optparser.disable_interspersed_args() (options, args) = optparser.parse_args() if options.chatty: logging.basicConfig(level=logging.DEBUG) elif options.verbose: logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.WARN) if options.api_token is None: sys.stderr.write("No API Token provided\n") usage(optparser) if options.api_url is None: options.api_url = "https://api.qubole.com/api/" if options.api_version is None: options.api_version = "v1.2" if options.poll_interval is None: options.poll_interval = 5 if options.skip_ssl_cert_check is None: options.skip_ssl_cert_check = False elif options.skip_ssl_cert_check: log.warn("Insecure mode enabled: skipping SSL cert verification\n") Qubole.configure(api_token=options.api_token, api_url=options.api_url, version=options.api_version, poll_interval=options.poll_interval, skip_ssl_cert_check=options.skip_ssl_cert_check) if len(args) < 1: sys.stderr.write("Missing first argument containing command type\n") usage(optparser) a0 = args.pop(0) if a0 in CommandClasses: return cmdmain(a0, args) if a0 == "cluster": return clustermain(args) if a0 == "action": return actionmain(args) if a0 == "scheduler": return schedulermain(args) if a0 == "report": return reportmain(args) if a0 == "dbtap": return dbtapmain(args) if a0 == "space": return spacemain(args) if a0 == "space_subscriber": return spacesubscribermain(args) if a0 == "published_hivetable": return publishedhivetablemain(args) if a0 == "subscribed_hivetable": return subscribedhivetablemain(args) if a0 == "cloud_cred": return cloudcredmain(args) if a0 == "cross_acc_config": return crossaccountconfigmain(args) if a0 == "group": return groupmain(args) if a0 == "role": return rolemain(args) if a0 == "account": return accountmain(args) cmdset = set(CommandClasses.keys()) sys.stderr.write("First command must be one of <%s>\n" % "|".join(cmdset.union(["cluster", "scheduler", "report", "dbtap", "role", "group"]))) usage(optparser) if __name__ == '__main__': try: sys.exit(main()) except qds_sdk.exception.Error as e: sys.stderr.write("Error: Status code %s (%s) from url %s\n" % (e.request.status_code, e.__class__.__name__, e.request.url)) sys.exit(1) except qds_sdk.exception.ParseError as e: sys.stderr.write("Error: %s\n" % str(e)) sys.stderr.write("Usage: %s\n" % e.usage) sys.exit(2) except Exception: traceback.print_exc(file=sys.stderr) sys.exit(3)
############################################################################### ## ## Copyright (C) 2014-2015, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: [email protected] ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division from xml.auto_gen import XMLDAOListBase from sql.auto_gen import SQLDAOListBase from vistrails.core.system import get_elementtree_library from vistrails.db import VistrailsDBException from vistrails.db.versions.v0_9_4 import version as my_version from vistrails.db.versions.v0_9_4.domain import DBVistrail, DBWorkflow, DBLog, \ DBAbstraction, DBGroup ElementTree = get_elementtree_library() class DAOList(dict): def __init__(self): self['xml'] = XMLDAOListBase() self['sql'] = SQLDAOListBase() def parse_xml_file(self, filename): return ElementTree.parse(filename) def write_xml_file(self, filename, tree): tree.write(filename) def read_xml_object(self, vtType, node): return self['xml'][vtType].fromXML(node) def write_xml_object(self, obj, node=None): res_node = self['xml'][obj.vtType].toXML(obj, node) return res_node def open_from_xml(self, filename, vtType, tree=None): """open_from_xml(filename) -> DBVistrail""" if tree is None: tree = self.parse_xml_file(filename) vistrail = self.read_xml_object(vtType, tree.getroot()) return vistrail def save_to_xml(self, obj, filename, tags, version=None): """save_to_xml(obj : object, filename: str, tags: dict, version: str) -> None """ root = self.write_xml_object(obj) if version is None: version = my_version root.set('version', version) for k, v in tags.iteritems(): root.set(k, v) tree = ElementTree.ElementTree(root) self.write_xml_file(filename, tree) def open_from_db(self, db_connection, vtType, id, lock=False): all_objects = {} global_props = {'id': id} # print global_props res_objects = self['sql'][vtType].get_sql_columns(db_connection, global_props, lock) if len(res_objects) > 1: raise VistrailsDBException("More than object of type '%s' and " "id '%s' exist in the database" % \ (vtType, id)) elif len(res_objects) <= 0: raise VistrailsDBException("No objects of type '%s' and " "id '%s' exist in the database" % \ (vtType, id)) all_objects.update(res_objects) res = res_objects.values()[0] del global_props['id'] for dao in self['sql'].itervalues(): if (dao == self['sql'][DBVistrail.vtType] or # dao == self['sql'][DBWorkflow.vtType] or dao == self['sql'][DBLog.vtType] or dao == self['sql'][DBAbstraction.vtType]): continue current_objs = dao.get_sql_columns(db_connection, global_props, lock) if dao == self['sql'][DBWorkflow.vtType]: for key, obj in current_objs.iteritems(): if key[0] == vtType and key[1] == id: continue elif key[0] == DBWorkflow.vtType: res_obj = self.open_from_db(db_connection, key[0], key[1], lock) res_dict = {} res_dict[(res_obj.db_id, res_obj.vtType)] = res_obj all_objects.update(res_dict) else: all_objects.update(current_objs) for key, obj in all_objects.iteritems(): if key[0] == vtType and key[1] == id: continue self['sql'][obj.vtType].from_sql_fast(obj, all_objects) for obj in all_objects.itervalues(): obj.is_dirty = False obj.is_new = False return res def save_to_db(self, db_connection, obj, do_copy=False, global_props=None): if do_copy and obj.db_id is not None: obj.db_id = None children = obj.db_children() # forSQL=True) children.reverse() if global_props is None: global_props = {'entity_type': obj.vtType} # print 'global_props:', global_props # assumes not deleting entire thing (child, _, _) = children[0] self['sql'][child.vtType].set_sql_columns(db_connection, child, global_props, do_copy) self['sql'][child.vtType].to_sql_fast(child, do_copy) global_props = {'entity_id': child.db_id, 'entity_type': child.vtType} if not do_copy: for (child, _, _) in children: for c in child.db_deleted_children(True): self['sql'][c.vtType].delete_sql_column(db_connection, c, global_props) (child, _, _) = children.pop(0) child.is_dirty = False child.is_new = False for (child, _, _) in children: # print "child:", child.vtType, child.db_id self['sql'][child.vtType].set_sql_columns(db_connection, child, global_props, do_copy) self['sql'][child.vtType].to_sql_fast(child, do_copy) if child.vtType == DBGroup.vtType: if child.db_workflow: # print '*** entity_type:', global_props['entity_type'] self.save_to_db(db_connection, child.db_workflow, do_copy, {'entity_id': global_props['entity_id'], 'entity_type': \ global_props['entity_type']} ) child.is_dirty = False child.is_new = False def serialize(self, object): root = self.write_xml_object(object) return ElementTree.tostring(root) def unserialize(self, str, obj_type): def set_dirty(obj): for child, _, _ in obj.db_children(): if child.vtType == DBGroup.vtType: if child.db_workflow: set_dirty(child.db_workflow) child.is_dirty = True child.is_new = True try: root = ElementTree.fromstring(str) obj = self.read_xml_object(obj_type, root) set_dirty(obj) return obj except SyntaxError, e: msg = "Invalid VisTrails serialized object %s" % str raise VistrailsDBException(msg) return None
from django.contrib import admin from django.forms import ModelForm,HiddenInput # Register your models here. from uwsgi_it_api.models import * class ServerAdmin(admin.ModelAdmin): def memory_status(self): return "available:%d used:%d free:%d" % (self.memory, self.used_memory, self.free_memory) def storage_status(self): return "available:%d used:%d free:%d" % (self.storage, self.used_storage, self.free_storage) list_display = ('__unicode__', memory_status, storage_status, 'weight', 'owner', 'ctime', 'mtime') list_filter = ('datacenter',) class ContainerAccounted(admin.SimpleListFilter): title = 'is accounted ?' parameter_name = 'is_accounted' def lookups(self, request, model_admin): return (('1', 'Yes'), ('0', 'No')) def queryset(self, request, queryset): if self.value() == '0': return queryset.filter(accounted=False, server__owner=None) elif self.value() == '1': return queryset.filter(accounted=True, server__owner=None) return queryset class ContainerAdminForm(ModelForm): def __init__(self, *args, **kwargs): super(ContainerAdminForm, self).__init__(*args, **kwargs) if self.instance and self.instance.pk: self.fields['tags'].queryset = Tag.objects.filter(customer=self.instance.customer) self.fields['custom_distro'].queryset = CustomDistro.objects.filter(container__server=self.instance.server, container__customer=self.instance.customer).exclude(container=self.instance) else: self.fields['tags'].widget = HiddenInput() self.fields['custom_distro'].widget = HiddenInput() def regenerate_secret_uuid(modeladmin, request, queryset): for item in queryset.all(): item.regenerate_secret_uuid() regenerate_secret_uuid.short_description = 'Regenerate secret uuid' class ContainerAdmin(admin.ModelAdmin): def is_accounted(self): if self.accounted: return True if self.server and self.server.owner: return True return False is_accounted.boolean = True list_display = ('__unicode__', 'ip', 'hostname', 'customer', 'server', 'distro', 'memory', 'storage', is_accounted, 'ctime') list_filter = ('server', 'distro', ContainerAccounted) search_fields = ('name', 'customer__user__username', 'tags__name', 'admin_note', 'admin_order') actions = [regenerate_secret_uuid] form = ContainerAdminForm class DomainAdminForm(ModelForm): def __init__(self, *args, **kwargs): super(DomainAdminForm, self).__init__(*args, **kwargs) if self.instance and self.instance.pk: self.fields['tags'].queryset = Tag.objects.filter(customer=self.instance.customer) else: self.fields['tags'].widget = HiddenInput() class DomainAdmin(admin.ModelAdmin): list_display = ('__unicode__', 'customer') list_filter = ('customer',) search_fields = ('name',) form = DomainAdminForm class ContainerMetricAdmin(admin.ModelAdmin): list_display = ('container', 'year', 'month', 'day') list_filter = ('year', 'month') class DomainMetricAdmin(admin.ModelAdmin): list_display = ('domain', 'container', 'year', 'month', 'day') list_filter = ('year', 'month') class LegionNodeInline(admin.TabularInline): model = LegionNode class LegionAdmin(admin.ModelAdmin): def servers(self, obj): return ','.join([s.name for s in obj.nodes.all()]) list_display = ('__unicode__', 'customer', 'servers', 'note') inlines = [ LegionNodeInline ] class TagAdmin(admin.ModelAdmin): list_display = ('__unicode__', 'customer') list_filter = ('customer',) search_fields = ('name',) class FloatingAddressAdmin(admin.ModelAdmin): list_display = ('address', 'mapped_to_server', 'legion', 'customer', 'note') def _user__email(self): if self.user: return self.user.email return '' _user__email.short_description = 'Email' def _containers__count(self): return self.container_set.count() _containers__count.short_description = 'Containers' class CustomerContainerInline(admin.TabularInline): model = Container fields = ['name', 'server', 'memory', 'storage'] readonly_fields = ['name', 'server', 'memory', 'storage'] extra = 0 can_delete = False max_num = 0 class CustomerAdmin(admin.ModelAdmin): list_display = ('user', _user__email, 'company', 'vat', _containers__count) inlines = [CustomerContainerInline] search_fields = ('user__username', 'user__email', 'company', 'vat', 'admin_note') class NewsAdmin(admin.ModelAdmin): list_display = ('content', 'ctime', 'public') class LoopboxAdminForm(ModelForm): def __init__(self, *args, **kwargs): super(LoopboxAdminForm, self).__init__(*args, **kwargs) if self.instance and self.instance.pk: self.fields['tags'].queryset = Tag.objects.filter(customer=self.instance.container.customer) else: self.fields['tags'].widget = HiddenInput() class LoopboxAdmin(admin.ModelAdmin): list_display = ('container', 'filename', 'mountpoint') form = LoopboxAdminForm class ServerMetadataAdmin(admin.ModelAdmin): def brvalue(self): return self.value.replace('\n', '<br/>').replace(' ', '&nbsp;') brvalue.short_description = 'Value' brvalue.allow_tags = True list_display = ('server', 'metadata', brvalue, 'mtime') list_filter = ('server', 'metadata') class AlarmAdmin(admin.ModelAdmin): list_display = ('container', 'vassal', 'level', 'unix', 'msg') list_filter = ('level',) search_fields = ('msg', '_class', 'vassal', 'color') class CustomDistroAdmin(admin.ModelAdmin): list_display = ('container', 'name', 'path') class PortmapAdmin(admin.ModelAdmin): def _container__server(self): return self.container.server def _container__ip(self): return self.container.ip list_display = (_container__server, 'proto', 'public_port', 'container', _container__ip, 'private_port') admin.site.register(Server, ServerAdmin) admin.site.register(Distro) admin.site.register(Customer, CustomerAdmin) admin.site.register(Container, ContainerAdmin) admin.site.register(Domain, DomainAdmin) admin.site.register(Legion, LegionAdmin) admin.site.register(ContainerLink) admin.site.register(Datacenter) admin.site.register(Tag, TagAdmin) admin.site.register(CustomService) admin.site.register(CustomerAttribute) admin.site.register(Rule) admin.site.register(PrivilegedClient) admin.site.register(FloatingAddress,FloatingAddressAdmin) admin.site.register(NetworkRXContainerMetric,ContainerMetricAdmin) admin.site.register(NetworkTXContainerMetric,ContainerMetricAdmin) admin.site.register(CPUContainerMetric,ContainerMetricAdmin) admin.site.register(MemoryContainerMetric,ContainerMetricAdmin) admin.site.register(MemoryRSSContainerMetric,ContainerMetricAdmin) admin.site.register(MemoryCacheContainerMetric,ContainerMetricAdmin) admin.site.register(IOReadContainerMetric,ContainerMetricAdmin) admin.site.register(IOWriteContainerMetric,ContainerMetricAdmin) admin.site.register(QuotaContainerMetric,ContainerMetricAdmin) admin.site.register(HitsDomainMetric,DomainMetricAdmin) admin.site.register(NetworkRXDomainMetric,DomainMetricAdmin) admin.site.register(NetworkTXDomainMetric,DomainMetricAdmin) admin.site.register(News, NewsAdmin) admin.site.register(Loopbox, LoopboxAdmin) admin.site.register(Alarm, AlarmAdmin) admin.site.register(CustomDistro, CustomDistroAdmin) admin.site.register(Portmap, PortmapAdmin) admin.site.register(ServerFileMetadata) admin.site.register(ServerMetadata, ServerMetadataAdmin)
import json import pytest from django.utils import timezone from rest_framework_json_api import serializers, views from rest_framework_json_api.renderers import JSONRenderer from example.models import Author, Blog, Comment, Entry # serializers class RelatedModelSerializer(serializers.ModelSerializer): blog = serializers.ReadOnlyField(source="entry.blog") class Meta: model = Comment fields = ("id", "blog") class DummyTestSerializer(serializers.ModelSerializer): """ This serializer is a simple compound document serializer which includes only a single embedded relation """ related_models = RelatedModelSerializer( source="comments", many=True, read_only=True ) json_field = serializers.SerializerMethodField() def get_json_field(self, entry): return {"JsonKey": "JsonValue"} class Meta: model = Entry fields = ("related_models", "json_field") class JSONAPIMeta: included_resources = ("related_models",) class EntryDRFSerializers(serializers.ModelSerializer): class Meta: model = Entry fields = ("headline", "body_text") read_only_fields = ("tags",) class CommentWithNestedFieldsSerializer(serializers.ModelSerializer): entry = EntryDRFSerializers() class Meta: model = Comment exclude = ("created_at", "modified_at", "author") # fields = ('entry', 'body', 'author',) class AuthorWithNestedFieldsSerializer(serializers.ModelSerializer): comments = CommentWithNestedFieldsSerializer(many=True) class Meta: model = Author fields = ("name", "email", "comments") # views class DummyTestViewSet(views.ModelViewSet): queryset = Entry.objects.all() serializer_class = DummyTestSerializer class ReadOnlyDummyTestViewSet(views.ReadOnlyModelViewSet): queryset = Entry.objects.all() serializer_class = DummyTestSerializer class AuthorWithNestedFieldsViewSet(views.ModelViewSet): queryset = Author.objects.all() serializer_class = AuthorWithNestedFieldsSerializer resource_name = "authors" def render_dummy_test_serialized_view(view_class, instance): serializer = view_class.serializer_class(instance=instance) renderer = JSONRenderer() return renderer.render(serializer.data, renderer_context={"view": view_class()}) def test_simple_reverse_relation_included_renderer(): """ Test renderer when a single reverse fk relation is passed. """ rendered = render_dummy_test_serialized_view(DummyTestViewSet, Entry()) assert rendered def test_simple_reverse_relation_included_read_only_viewset(): rendered = render_dummy_test_serialized_view(ReadOnlyDummyTestViewSet, Entry()) assert rendered def test_render_format_field_names(settings): """Test that json field is kept untouched.""" settings.JSON_API_FORMAT_FIELD_NAMES = "dasherize" rendered = render_dummy_test_serialized_view(DummyTestViewSet, Entry()) result = json.loads(rendered.decode()) assert result["data"]["attributes"]["json-field"] == {"JsonKey": "JsonValue"} def test_writeonly_not_in_response(): """Test that writeonly fields are not shown in list response""" class WriteonlyTestSerializer(serializers.ModelSerializer): """Serializer for testing the absence of write_only fields""" comments = serializers.ResourceRelatedField( many=True, write_only=True, queryset=Comment.objects.all() ) rating = serializers.IntegerField(write_only=True) class Meta: model = Entry fields = ("comments", "rating") class WriteOnlyDummyTestViewSet(views.ReadOnlyModelViewSet): queryset = Entry.objects.all() serializer_class = WriteonlyTestSerializer rendered = render_dummy_test_serialized_view(WriteOnlyDummyTestViewSet, Entry()) result = json.loads(rendered.decode()) assert "rating" not in result["data"]["attributes"] assert "relationships" not in result["data"] def test_render_empty_relationship_reverse_lookup(): """Test that empty relationships are rendered as None.""" class EmptyRelationshipSerializer(serializers.ModelSerializer): class Meta: model = Author fields = ("bio",) class EmptyRelationshipViewSet(views.ReadOnlyModelViewSet): queryset = Author.objects.all() serializer_class = EmptyRelationshipSerializer rendered = render_dummy_test_serialized_view(EmptyRelationshipViewSet, Author()) result = json.loads(rendered.decode()) assert "relationships" in result["data"] assert "bio" in result["data"]["relationships"] assert result["data"]["relationships"]["bio"] == {"data": None} @pytest.mark.django_db def test_extract_relation_instance(comment): serializer = RelatedModelSerializer(instance=comment) got = JSONRenderer.extract_relation_instance( field=serializer.fields["blog"], resource_instance=comment ) assert got == comment.entry.blog def test_render_serializer_as_attribute(db): # setting up blog = Blog.objects.create(name="Some Blog", tagline="It's a blog") entry = Entry.objects.create( blog=blog, headline="headline", body_text="body_text", pub_date=timezone.now(), mod_date=timezone.now(), n_comments=0, n_pingbacks=0, rating=3, ) author = Author.objects.create(name="some_author", email="[email protected]") entry.authors.add(author) Comment.objects.create( entry=entry, body="testing one two three", author=Author.objects.first() ) rendered = render_dummy_test_serialized_view(AuthorWithNestedFieldsViewSet, author) result = json.loads(rendered.decode()) expected = { "data": { "type": "authors", "id": "1", "attributes": { "name": "some_author", "email": "[email protected]", "comments": [ { "id": 1, "entry": { "headline": "headline", "body_text": "body_text", }, "body": "testing one two three", } ], }, } } assert expected == result
# -*- coding: utf-8 -*- import collections import glob import json import os import sys # thirdparty libraies import click import requests # This package import ar_too as at from ar_too.exceptions import UnknownArtifactoryRestError # "CONSTANTS" FETCH_WHAT_HELP_STR = """ What type of configs you want to fetch. current options are repos (which takes optional --include_defaults and --include_filter arguements """ FETCH_REPO_TYPE_HELP_STR = """Artifactory repo type. One of LOCAL, REMOTE, VIRTUAL. If not given, all will be retreived.""" def _get_ldap_dict(ldap_json): """ return an OrderedDict for the given json file Parameters ---------- ldap_json : string filepath to json file with config options to be loaded Returns ------- ldap_dict : collections.OrderedDict ordered dictionary for use in configuring artifactory """ try: with click.open_file(ldap_json) as f: json_dict = json.load(f, object_pairs_hook=collections.OrderedDict) except: click.echo("Can't open that LDAP json file") raise return json_dict def _config_ldap(url, username, password, ldap_json): """ _config_ldap gets the current configuration and a json file, and update the config if necessary Parameters ---------- url : string url for artifactory server username : string admin username on artifactory server password : string password for admin user ldap_json : filepath to json file the represents the ldap dictionary """ auth = (username, password) current_conf = at.get_artifactory_config_from_url(url, auth) ldap_dict = _get_ldap_dict(ldap_json) new_conf, changed = at.update_ldapSettings_from_dict(current_conf, ldap_dict) if changed: click.echo("Modifying LDAP settings...") success = at.update_artifactory_config(url, auth, new_conf) else: click.echo("LDAP settings unchanged.") success = True if not success: click.echo("Something went wrong") sys.exit(1) def _config_repos(url, username, password, repo_dir): """ for each file in the directory, create or update that repo Each file should be a json file of the format https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON Parameters ---------- url : string url for artifactory server username : string admin username on artifactory server password : string password for admin user repo_dir : string path to a directory with repository config json files Notes ----- This function will organize the repositories in two groups: first, local and remote repos second, virtual repos. This is because virtual repos aggregate local and remote repos and thus the locals and remotes must be present before we create the virtuals """ repos_list_dict = _get_repos_from_directory(repo_dir) ses = requests.Session() ses.auth = (username, password) for rclass in ['local', 'remote', 'virtual']: for repo_dict in repos_list_dict[rclass]: success = at.cr_repository(url, repo_dict, session=ses) if success: click.echo("Successfully updated {}".format(repo_dict['key'])) else: click.echo("Failed updating {}".format(repo_dict['key'])) def _get_repos_from_directory(repo_dir): """ return a dictionary of lists with 3 keys: local, remote, virtual. Each item of the list will be a dictionary representing one of these: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON Parameters ---------- repo_dir : string path to a directory with repository config json files. see above Returns ------- repos_list_dict : dictionary see description above Notes ----- This will ONLY find .json files """ if not os.path.isdir(repo_dir): click.echo("{} is not a directory.".format(repo_dir)) sys.exit(1) repos_list_dict = { "local": [], "remote": [], "virtual": [] } for jfile in glob.glob('{}/*.json'.format(repo_dir)): with click.open_file(jfile) as f: jdict = json.loads(f.read()) try: rclass = jdict['rclass'] repos_list_dict[rclass].append(jdict) except KeyError: click.echo("file {} as no rclass key.".format(jfile)) click.echo("Skipping.") return repos_list_dict def _config_admin_pass(host_url, password, target_password): """ set the admin password for Artifactory Parameters ---------- host_url : string url for artifactory server, including the /artifactory context password : string password for admin user target_password : string desired password for admin user """ try: changed = at.update_password( host_url, 'admin', password, target_password ) except UnknownArtifactoryRestError as ae: click.echo("Failed to update password.") click.echo(ae.msg) raise except: click.echo("Failed to update password for reasons unknown.") raise if changed: click.echo("Password successfully changed") else: click.echo("Password already at target") def _fetch_repos(host_url, username, password, inc_defaults, inc_filter, output_dir, repo_type): """ download json configurations for repos, place them in output dir Parameters ---------- host_url : string url for artifactory server, including the /artifactory context username : string admin username on artifactory server password : string password for admin user inc_defaults : boolean Whether we should include the repos artifactory ships with inc_filter : string Only include repos whose key includes this filter output_dir : string directory to write json files to repo_type : string One of the 3 artifactory repo types (LOCAL, REMOTE, VIRTUAL) """ if repo_type is not None: repo_type = repo_type.upper() if repo_type not in ["LOCAL", "REMOTE", "VIRTUAL"]: click.echo("repo_type must be one of LOCAL, REMOTE, VIRTUAL") sys.exit(1) else: repo_type = "ALL" if not os.path.isdir(output_dir): click.echo("Can't find target directory. Exiting") sys.exit(1) repo_obj_list = at.get_repo_list( host_url, repo_type=repo_type, include_defaults=inc_defaults, include_filter=inc_filter ) if len(repo_obj_list) == 0: click.echo("No repos found. Check your options") sys.exit(1) repo_list = [r['key'] for r in repo_obj_list] repo_config_list = at.get_repo_configs( host_url, repo_list, username=username, passwd=password ) for repo in repo_config_list: repo_conf_file = os.path.join(output_dir, '{}.json'.format(repo['key'])) with open(repo_conf_file, 'w') as f: f.write(json.dumps(repo, indent=4)) @click.group() @click.option('--username', help="username with admin privileges") @click.option('--password', help="password for user") @click.option('--url', help="url and port for the artifactotry server") @click.pass_context def cli(ctx, **kwargs): """ Main entrypoint for ar_too cli """ ctx.obj = kwargs @cli.group() @click.pass_context def fetch(ctx, **kwargs): """ fetch command group """ ctx.obj.update(kwargs) @fetch.command() @click.option('--include_defaults', is_flag=True, default=False, help="include artifactory default repos with the repos arg") @click.option('--include_filter', help="only repos that include this in their key will be fetched") @click.option('--output_dir', default=os.getcwd(), help="directory to place files") @click.option('--repo_type', help=FETCH_REPO_TYPE_HELP_STR) @click.pass_context def repos(ctx, **kwargs): """ commands for retreiving configs from artifactory """ ctx.obj.update(kwargs) _fetch_repos( ctx.obj['url'], ctx.obj['username'], ctx.obj['password'], ctx.obj['include_defaults'], ctx.obj['include_filter'], ctx.obj['output_dir'], ctx.obj['repo_type'] ) @cli.command() @click.option('--ldap_json', help="json file for ldap settings") @click.option('--repos_dir', help="Dir with repository configuration files") @click.option('--admin_pass', help="set new admin password to this") @click.pass_context def configure(ctx, **kwargs): """ command(s) for configuring artifactory """ ctx.obj.update(kwargs) if ctx.obj['ldap_json'] is not None: _config_ldap( ctx.obj['url'], ctx.obj['username'], ctx.obj['password'], ctx.obj['ldap_json'] ) if ctx.obj['repos_dir'] is not None: _config_repos( ctx.obj['url'], ctx.obj['username'], ctx.obj['password'], ctx.obj['repos_dir'] ) if ctx.obj['admin_pass'] is not None: if ctx.obj['username'] != 'admin': click.echo("Must use the admin user to update the admin user") sys.exit(1) _config_admin_pass( ctx.obj['url'], ctx.obj['password'], ctx.obj['admin_pass'] )
#!/usr/bin/env python import os import sys import numpy as np import nibabel as nb import argparse import mayavi.mlab as mlab from scai_utils import * COORD_FILE = "/home/cais/STUT/FSDATA/fsaverage2/mri/aparc12_roi_coords.txt" STRUCT_VOL = "/home/cais/STUT/FSDATA/fsaverage2/mri/brain.nii.gz" # STRUCT_VOL = "/home/cais/STUT/FSDATA/MNI152_1mm/mri/brain.nii.gz" # --- To convert into input --- # # componentFile = "corrSSI4_sigComponents_1.txt" # edges = [[0, 1], [1, 2], [2, 3], [3, 0], [1, 3]] DEFAULT_OPACITY=1.0 COMPONENT_CLRS = [(1.0, 0.75, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)] def translate_roi_name(roiName): if roiName.count("_Hg") > 0: return roiName.replace("_Hg", "_H") elif roiName.count("_aCGg") > 0: return roiName.replace("_aCGg", "_aCG") else: return roiName if __name__ == "__main__": ap = argparse.ArgumentParser(description="Render 3D network component image based on an input component file (from analyze_pt2_seedOnly_cmat.m. The thickness of the tubes are proportional to the sig value in the component file") ap.add_argument("componentFile", type=str) ap.add_argument("outputImgFN", type=str) ap.add_argument("--hemi", type=str, default="lh") ap.add_argument("--opacity", dest="t_opacity", \ type=float, default=DEFAULT_OPACITY, \ help="Opacity of 3D tubes (default=%f)" % DEFAULT_OPACITY) ap.add_argument("--noText", dest="bNoText", action="store_true", \ help="Do not plot the 3D text") ap.add_argument("--struct-vol", dest="altStructVol", type=str, help="Specify STRUCT_VOL that differs from the default: %s"% STRUCT_VOL) ap.add_argument("--coord-file", dest="altCoordFile", type=str, help="Specify coordinates file that differs from the default: %s" % COORD_FILE) ap.add_argument("--translate-roi-name", dest="bTranslateROIName", action="store_true", help="Translate ROI names for old-version of aparc12") ap.add_argument("--no-edges", dest="bNoEdges", help="Do not render the edges of the network") ap.add_argument("--vmax", dest="vmax", type=float, default=1600, help="vmax for brain volume rendering (default: 1600)") ap.add_argument("--cross-only", dest="bCrossOnly", action="store_true", help="Draw only cross-hemisphere connections (for hemi == bh or xh only)") if len(sys.argv) == 1: ap.print_help() sys.exit(1) # === Parse input arguments === # args = ap.parse_args() componentFile = args.componentFile outputImgFN = args.outputImgFN hemi = args.hemi t_opacity = args.t_opacity bNoText = args.bNoText if args.bCrossOnly and not (hemi == "bh" or hemi == "xh"): error_log("--cross-only used with hemi other than bh or xh") if not (hemi == "lh" or hemi == "rh" or hemi == "bh" or hemi == "xh"): raise Exception, "Unexpected hemi: %s" % hemi if args.altStructVol != None: STRUCT_VOL = args.altStructVol if args.altCoordFile != None: COORD_FILE = args.altCoordFile # print(hemi) # DEBUG # === Load the structural image === # check_file(STRUCT_VOL) sImg = nb.load(STRUCT_VOL) sImgDat = sImg.get_data() # DEBUG # sImgDat = sImgDat[0 : 4 : 256, 0 : 4: 256, 0 : 4: 256] # === Downsample === # D = 2 N = len(sImgDat) / D sImgDat1 = np.zeros([N, N, N]) for i0 in range(0, N): for i1 in range(0, N): for i2 in range(0, N): sImgDat1[i0, i1, i2] = sImgDat[i0 * D, i1 * D, i2 * D] sImgDat = sImgDat1; #sInds = np.mgrid[0 : sImg.shape[0], 0 : sImg.shape[1], 0 : sImg.shape[2]] sInds = np.mgrid[0 : len(sImgDat), 0 : len(sImgDat[0]), 0 : len(sImgDat[0][0])] imgMin = np.min(sImgDat) imgMax = np.max(sImgDat) print("Image min = %f" % imgMin) print("Image min = %f" % imgMax) # mlab.figure(size=(600, 450)) # === Read coordinates === # check_file(COORD_FILE) cf = open(COORD_FILE, "rt") ct = cf.read().split('\n') ct = remove_empty_strings(ct) cf.close() roi_names = [] roi_nums = [] roi_coords = [] for (i0, tline) in enumerate(ct): t_items = tline.split(' ') if len(t_items) != 5: raise Exception, "Unrecognized formant in a line of %s: %s" \ % (COORD_FILE, tline) roi_names.append(t_items[0]) roi_nums.append(t_items[1]) t_coord = [float(t_items[2]), float(t_items[3]), float(t_items[4])] roi_coords.append(t_coord) assert(len(roi_names) == len(roi_nums)) assert(len(roi_names) == len(roi_coords)) # === Read component file === # check_file(componentFile) compf = open(componentFile, "rt") compt = compf.read() compf.close() compt = remove_empty_strings(compt.split('\n')) edges = [] compNums = [] # Component number sigVals = [] for (i0, tline) in enumerate(compt): assert(tline.count(" - ") == 1) assert(tline.count(": sig=") == 1) linkName = tline.split(": sig=")[0] roi1Name = linkName.split(" - ")[0] roi2Name = linkName.split(" - ")[1] if hemi == "xh" or hemi == "bh": hemi1 = roi1Name.split("_")[0] hemi2 = roi2Name.split("_")[0] if args.bCrossOnly and (hemi1 == hemi2): print("Skipping same-hemisphere projection: %s --> %s" % (roi1Name, roi2Name)) continue if tline.count(", ") == 1: compNums.append(int(tline.split(", ")[1])) tline = tline.split(", ")[0] else: compNums.append(1) sigVal = np.abs(float(tline.split(": sig=")[1])) if args.bTranslateROIName: roi1Name = translate_roi_name(roi1Name) roi2Name = translate_roi_name(roi2Name) roi1Num = roi_names.index(roi1Name) roi2Num = roi_names.index(roi2Name) edges.append([roi1Num, roi2Num]) sigVals.append(sigVal) assert(len(edges) == len(sigVals)) assert(len(edges) == len(compNums)) # === Call mayavi for 3d drawing === # # edges = edges[:4] # DEBUG labPlotted = [0] * len(roi_names) # mlab.plot3d([0, 100], [0, 100], [0, 100], tube_radius=2.5) # === Plot the "axes" === # """ mlab.plot3d([-128, 128], [-128, -128], [-128, -128], \ color=(1, 0, 0), tube_radius=2) mlab.plot3d([-128, 128], [128, 128], [-128, -128], \ color=(1, 1, 0), tube_radius=2) mlab.plot3d([-128, 128], [-128, -128], [128, 128], \ color=(0, 1, 0), tube_radius=2) mlab.plot3d([-128, 128], [128, 128], [128, 128], \ color=(0, 1, 0), tube_radius=2) mlab.plot3d([-128, -128], [-128, 128], [-128, -128], \ color=(0, 0, 1), tube_radius=2) mlab.plot3d([128, 128], [-128, 128], [-128, -128], \ color=(0, 1, 1), tube_radius=2) """ mlab.figure(size=(800, 600), bgcolor=(1.0, 1.0, 1.0)) # Coordinates: (in increasing coordinate value) # Dimension 1: left to right (In image: R to L: Need - ) # Dimension 2: superior to inferior (In image: S to I: Okay ) # Dimension 3: anterior to posterior (In image: P to A: Need - ) # Green: anterior # Red: superior # Blue: right # mlab.show() #""" for (i0, tlink) in enumerate(edges): t_x = np.array([roi_coords[tlink[0]][0], \ roi_coords[tlink[1]][0]]) / D - N / 2 t_x = -t_x t_y = np.array([roi_coords[tlink[0]][1], \ roi_coords[tlink[1]][1]]) / D- N / 2 t_z = np.array([roi_coords[tlink[0]][2], \ roi_coords[tlink[1]][2]]) / D - N / 2 mlab.plot3d(t_x, t_y, t_z, tube_radius=sigVals[i0] * 0.3 / D, \ color=COMPONENT_CLRS[compNums[i0] - 1], \ opacity=t_opacity) # mlab.plot3d(t_x, t_y, t_z, tube_radius=1) for j in range(2): if labPlotted[tlink[j]] == 0: mlab.points3d(t_x[j], t_y[j], t_z[j], \ scale_factor = 2.0 / D, \ color=COMPONENT_CLRS[compNums[i0] - 1], \ opacity=t_opacity) if hemi == "lh" or hemi == "rh": rName = roi_names[tlink[j]].replace("lh_", "")\ .replace("rh_", "") else: rName = roi_names[tlink[j]].replace("lh_", "L ")\ .replace("rh_", "R ") if not bNoText: mlab.text3d(t_x[j], t_y[j], t_z[j], \ rName, \ color=(0, 0, 0), scale=2.0 / D) labPlotted[tlink[j]] = 1 #""" # points3d(t_x, t_y, t_z) # mlab.roll(-90) # mlab.pitch(90) # mlab.move([600, 600, 400]) #""" src = mlab.pipeline.scalar_field((sInds[0] - N / 2), \ (sInds[1] - N / 2), \ (sInds[2] - N / 2), \ sImgDat) # mlab.pipeline.volume(src, vmin=10, vmax=1600, # color=(1.0, 1.0, 1.0)) mlab.pipeline.volume(src, vmin=10, vmax=args.vmax, color=(1.0, 1.0, 1.0)) #""" if hemi == "lh": mlab.view(azimuth=210, elevation=100, roll=180, \ focalpoint=[0, 0, 0], distance=220 / D) elif hemi == "rh": mlab.view(azimuth=145, elevation=-80, roll=180, \ focalpoint=[0, 0, 0], distance=220 / D) elif hemi == "xh" or hemi == "bh": mlab.view(azimuth=90, elevation=10, roll=0, \ focalpoint=[0, 0, 0], distance=220 / D) #cam,foc = mlab.move() #print(cam) #print(foc) # view=mlab.view() # print(view) print("Rendering done.") # outputImgFN = "netw_component.png" os.system("rm -f %s" % outputImgFN) #""" print("Saving image file ...") mlab.savefig(outputImgFN) check_file(outputImgFN) print("Image file saved at: %s" % outputImgFN) #""" mlab.show()
# coding: utf-8 """ ORCID Member No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: Latest Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class Funding(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, type=None, organization_defined_type=None, title=None, short_description=None, amount=None, url=None, start_date=None, end_date=None, external_ids=None, contributors=None, organization=None, visibility=None): """ Funding - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'created_date': 'CreatedDate', 'last_modified_date': 'LastModifiedDate', 'source': 'Source', 'put_code': 'int', 'path': 'str', 'type': 'str', 'organization_defined_type': 'OrganizationDefinedFundingSubType', 'title': 'FundingTitle', 'short_description': 'str', 'amount': 'Amount', 'url': 'Url', 'start_date': 'FuzzyDate', 'end_date': 'FuzzyDate', 'external_ids': 'ExternalIDs', 'contributors': 'FundingContributors', 'organization': 'Organization', 'visibility': 'str' } self.attribute_map = { 'created_date': 'created-date', 'last_modified_date': 'last-modified-date', 'source': 'source', 'put_code': 'put-code', 'path': 'path', 'type': 'type', 'organization_defined_type': 'organization-defined-type', 'title': 'title', 'short_description': 'short-description', 'amount': 'amount', 'url': 'url', 'start_date': 'start-date', 'end_date': 'end-date', 'external_ids': 'external-ids', 'contributors': 'contributors', 'organization': 'organization', 'visibility': 'visibility' } self._created_date = created_date self._last_modified_date = last_modified_date self._source = source self._put_code = put_code self._path = path self._type = type self._organization_defined_type = organization_defined_type self._title = title self._short_description = short_description self._amount = amount self._url = url self._start_date = start_date self._end_date = end_date self._external_ids = external_ids self._contributors = contributors self._organization = organization self._visibility = visibility @property def created_date(self): """ Gets the created_date of this Funding. :return: The created_date of this Funding. :rtype: CreatedDate """ return self._created_date @created_date.setter def created_date(self, created_date): """ Sets the created_date of this Funding. :param created_date: The created_date of this Funding. :type: CreatedDate """ self._created_date = created_date @property def last_modified_date(self): """ Gets the last_modified_date of this Funding. :return: The last_modified_date of this Funding. :rtype: LastModifiedDate """ return self._last_modified_date @last_modified_date.setter def last_modified_date(self, last_modified_date): """ Sets the last_modified_date of this Funding. :param last_modified_date: The last_modified_date of this Funding. :type: LastModifiedDate """ self._last_modified_date = last_modified_date @property def source(self): """ Gets the source of this Funding. :return: The source of this Funding. :rtype: Source """ return self._source @source.setter def source(self, source): """ Sets the source of this Funding. :param source: The source of this Funding. :type: Source """ self._source = source @property def put_code(self): """ Gets the put_code of this Funding. :return: The put_code of this Funding. :rtype: int """ return self._put_code @put_code.setter def put_code(self, put_code): """ Sets the put_code of this Funding. :param put_code: The put_code of this Funding. :type: int """ self._put_code = put_code @property def path(self): """ Gets the path of this Funding. :return: The path of this Funding. :rtype: str """ return self._path @path.setter def path(self, path): """ Sets the path of this Funding. :param path: The path of this Funding. :type: str """ self._path = path @property def type(self): """ Gets the type of this Funding. :return: The type of this Funding. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this Funding. :param type: The type of this Funding. :type: str """ allowed_values = ["GRANT", "CONTRACT", "AWARD", "SALARY_AWARD"] if type not in allowed_values: raise ValueError( "Invalid value for `type` ({0}), must be one of {1}" .format(type, allowed_values) ) self._type = type @property def organization_defined_type(self): """ Gets the organization_defined_type of this Funding. :return: The organization_defined_type of this Funding. :rtype: OrganizationDefinedFundingSubType """ return self._organization_defined_type @organization_defined_type.setter def organization_defined_type(self, organization_defined_type): """ Sets the organization_defined_type of this Funding. :param organization_defined_type: The organization_defined_type of this Funding. :type: OrganizationDefinedFundingSubType """ self._organization_defined_type = organization_defined_type @property def title(self): """ Gets the title of this Funding. :return: The title of this Funding. :rtype: FundingTitle """ return self._title @title.setter def title(self, title): """ Sets the title of this Funding. :param title: The title of this Funding. :type: FundingTitle """ if title is None: raise ValueError("Invalid value for `title`, must not be `None`") self._title = title @property def short_description(self): """ Gets the short_description of this Funding. :return: The short_description of this Funding. :rtype: str """ return self._short_description @short_description.setter def short_description(self, short_description): """ Sets the short_description of this Funding. :param short_description: The short_description of this Funding. :type: str """ self._short_description = short_description @property def amount(self): """ Gets the amount of this Funding. :return: The amount of this Funding. :rtype: Amount """ return self._amount @amount.setter def amount(self, amount): """ Sets the amount of this Funding. :param amount: The amount of this Funding. :type: Amount """ self._amount = amount @property def url(self): """ Gets the url of this Funding. :return: The url of this Funding. :rtype: Url """ return self._url @url.setter def url(self, url): """ Sets the url of this Funding. :param url: The url of this Funding. :type: Url """ self._url = url @property def start_date(self): """ Gets the start_date of this Funding. :return: The start_date of this Funding. :rtype: FuzzyDate """ return self._start_date @start_date.setter def start_date(self, start_date): """ Sets the start_date of this Funding. :param start_date: The start_date of this Funding. :type: FuzzyDate """ self._start_date = start_date @property def end_date(self): """ Gets the end_date of this Funding. :return: The end_date of this Funding. :rtype: FuzzyDate """ return self._end_date @end_date.setter def end_date(self, end_date): """ Sets the end_date of this Funding. :param end_date: The end_date of this Funding. :type: FuzzyDate """ self._end_date = end_date @property def external_ids(self): """ Gets the external_ids of this Funding. :return: The external_ids of this Funding. :rtype: ExternalIDs """ return self._external_ids @external_ids.setter def external_ids(self, external_ids): """ Sets the external_ids of this Funding. :param external_ids: The external_ids of this Funding. :type: ExternalIDs """ self._external_ids = external_ids @property def contributors(self): """ Gets the contributors of this Funding. :return: The contributors of this Funding. :rtype: FundingContributors """ return self._contributors @contributors.setter def contributors(self, contributors): """ Sets the contributors of this Funding. :param contributors: The contributors of this Funding. :type: FundingContributors """ self._contributors = contributors @property def organization(self): """ Gets the organization of this Funding. :return: The organization of this Funding. :rtype: Organization """ return self._organization @organization.setter def organization(self, organization): """ Sets the organization of this Funding. :param organization: The organization of this Funding. :type: Organization """ if organization is None: raise ValueError("Invalid value for `organization`, must not be `None`") self._organization = organization @property def visibility(self): """ Gets the visibility of this Funding. :return: The visibility of this Funding. :rtype: str """ return self._visibility @visibility.setter def visibility(self, visibility): """ Sets the visibility of this Funding. :param visibility: The visibility of this Funding. :type: str """ allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] if visibility not in allowed_values: raise ValueError( "Invalid value for `visibility` ({0}), must be one of {1}" .format(visibility, allowed_values) ) self._visibility = visibility def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, Funding): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
""" Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.conf import settings from django.core.exceptions import FieldError from django.db import connections from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, SelectInfo from django.db.models.sql.datastructures import Date, DateTime from django.db.models.sql.query import Query from django.db.models.sql.where import AND, Constraint from django.utils import six from django.utils import timezone __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery', 'DateTimeQuery', 'AggregateQuery'] class DeleteQuery(Query): """ Delete queries are done through this class, since they are more constrained than general queries. """ compiler = 'SQLDeleteCompiler' def do_query(self, table, where, using): self.tables = [table] self.where = where self.get_compiler(using).execute_sql(None) def delete_batch(self, pk_list, using, field=None): """ Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. """ if not field: field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): where = self.where_class() where.add((Constraint(None, field.column, field), 'in', pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND) self.do_query(self.get_meta().db_table, where, using=using) def delete_qs(self, query, using): """ Delete the queryset in one SQL query (if possible). For simple queries this is done by copying the query.query.where to self.query, for complex queries by using subquery. """ innerq = query.query # Make sure the inner query has at least one table in use. innerq.get_initial_alias() # The same for our new query. self.get_initial_alias() innerq_used_tables = [t for t in innerq.tables if innerq.alias_refcount[t]] if ((not innerq_used_tables or innerq_used_tables == self.tables) and not len(innerq.having)): # There is only the base table in use in the query, and there are # no aggregate filtering going on. self.where = innerq.where else: pk = query.model._meta.pk if not connections[using].features.update_can_self_select: # We can't do the delete using subquery. values = list(query.values_list('pk', flat=True)) if not values: return self.delete_batch(values, using) return else: innerq.clear_select_clause() innerq.select = [ SelectInfo((self.get_initial_alias(), pk.column), None) ] values = innerq where = self.where_class() where.add((Constraint(None, pk.column, pk), 'in', values), AND) self.where = where self.get_compiler(using).execute_sql(None) class UpdateQuery(Query): """ Represents an "update" SQL query. """ compiler = 'SQLUpdateCompiler' def __init__(self, *args, **kwargs): super(UpdateQuery, self).__init__(*args, **kwargs) self._setup_query() def _setup_query(self): """ Runs on initialization and after cloning. Any attributes that would normally be set in __init__ should go in here, instead, so that they are also set up after a clone() call. """ self.values = [] self.related_ids = None if not hasattr(self, 'related_updates'): self.related_updates = {} def clone(self, klass=None, **kwargs): return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs) def update_batch(self, pk_list, values, using): pk_field = self.get_meta().pk self.add_update_values(values) for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.where = self.where_class() self.where.add((Constraint(None, pk_field.column, pk_field), 'in', pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND) self.get_compiler(using).execute_sql(None) def add_update_values(self, values): """ Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets. """ values_seq = [] for name, val in six.iteritems(values): field, model, direct, m2m = self.get_meta().get_field_by_name(name) if not direct or m2m: raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) if model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) def add_update_fields(self, values_seq): """ Turn a sequence of (field, model, value) triples into an update query. Used by add_update_values() as well as the "fast" update path when saving models. """ self.values.extend(values_seq) def add_related_update(self, model, field, value): """ Adds (name, value) to an update query for an ancestor model. Updates are coalesced so that we only run one update query per ancestor. """ try: self.related_updates[model].append((field, None, value)) except KeyError: self.related_updates[model] = [(field, None, value)] def get_related_updates(self): """ Returns a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table. """ if not self.related_updates: return [] result = [] for model, values in six.iteritems(self.related_updates): query = UpdateQuery(model) query.values = values if self.related_ids is not None: query.add_filter(('pk__in', self.related_ids)) result.append(query) return result class InsertQuery(Query): compiler = 'SQLInsertCompiler' def __init__(self, *args, **kwargs): super(InsertQuery, self).__init__(*args, **kwargs) self.fields = [] self.objs = [] def clone(self, klass=None, **kwargs): extras = { 'fields': self.fields[:], 'objs': self.objs[:], 'raw': self.raw, } extras.update(kwargs) return super(InsertQuery, self).clone(klass, **extras) def insert_values(self, fields, objs, raw=False): """ Set up the insert query from the 'insert_values' dictionary. The dictionary gives the model field names and their target values. If 'raw_values' is True, the values in the 'insert_values' dictionary are inserted directly into the query, rather than passed as SQL parameters. This provides a way to insert NULL and DEFAULT keywords into the query, for example. """ self.fields = fields self.objs = objs self.raw = raw class DateQuery(Query): """ A DateQuery is a normal query, except that it specifically selects a single date field. This requires some special handling when converting the results back to Python objects, so we put it in a separate class. """ compiler = 'SQLDateCompiler' def add_select(self, field_name, lookup_type, order='ASC'): """ Converts the query into an extraction query. """ try: result = self.setup_joins( field_name.split(LOOKUP_SEP), self.get_meta(), self.get_initial_alias(), ) except FieldError: raise FieldDoesNotExist("%s has no field named '%s'" % ( self.get_meta().object_name, field_name )) field = result[0] self._check_field(field) # overridden in DateTimeQuery alias = result[3][-1] select = self._get_select((alias, field.column), lookup_type) self.clear_select_clause() self.select = [SelectInfo(select, None)] self.distinct = True self.order_by = [1] if order == 'ASC' else [-1] if field.null: self.add_filter(("%s__isnull" % field_name, False)) def _check_field(self, field): assert isinstance(field, DateField), \ "%r isn't a DateField." % field.name if settings.USE_TZ: assert not isinstance(field, DateTimeField), \ "%r is a DateTimeField, not a DateField." % field.name def _get_select(self, col, lookup_type): return Date(col, lookup_type) class DateTimeQuery(DateQuery): """ A DateTimeQuery is like a DateQuery but for a datetime field. If time zone support is active, the tzinfo attribute contains the time zone to use for converting the values before truncating them. Otherwise it's set to None. """ compiler = 'SQLDateTimeCompiler' def _check_field(self, field): assert isinstance(field, DateTimeField), \ "%r isn't a DateTimeField." % field.name def _get_select(self, col, lookup_type): if self.tzinfo is None: tzname = None else: tzname = timezone._get_timezone_name(self.tzinfo) return DateTime(col, lookup_type, tzname) class AggregateQuery(Query): """ An AggregateQuery takes another query as a parameter to the FROM clause and only selects the elements in the provided list. """ compiler = 'SQLAggregateCompiler' def add_subquery(self, query, using): self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
from __future__ import absolute_import, division, print_function import os import absl.flags FLAGS = absl.flags.FLAGS # sphinx-doc: training_ref_flags_start def create_flags(): # Importer # ======== f = absl.flags f.DEFINE_string('train_files', '', 'comma separated list of files specifying the dataset used for training. Multiple files will get merged. If empty, training will not be run.') f.DEFINE_string('dev_files', '', 'comma separated list of files specifying the dataset used for validation. Multiple files will get merged. If empty, validation will not be run.') f.DEFINE_string('test_files', '', 'comma separated list of files specifying the dataset used for testing. Multiple files will get merged. If empty, the model will not be tested.') f.DEFINE_string('read_buffer', '1MB', 'buffer-size for reading samples from datasets (supports file-size suffixes KB, MB, GB, TB)') f.DEFINE_string('feature_cache', '', 'cache MFCC features to disk to speed up future training runs on the same data. This flag specifies the path where cached features extracted from --train_files will be saved. If empty, or if online augmentation flags are enabled, caching will be disabled.') f.DEFINE_integer('feature_win_len', 32, 'feature extraction audio window length in milliseconds') f.DEFINE_integer('feature_win_step', 20, 'feature extraction window step length in milliseconds') f.DEFINE_integer('audio_sample_rate', 16000, 'sample rate value expected by model') f.DEFINE_boolean('skip_if_existed', False, 'skip if output existed') # Data Augmentation # ================ f.DEFINE_float('data_aug_features_additive', 0, 'std of the Gaussian additive noise') f.DEFINE_float('data_aug_features_multiplicative', 0, 'std of normal distribution around 1 for multiplicative noise') f.DEFINE_float('augmentation_spec_dropout_keeprate', 1, 'keep rate of dropout augmentation on spectrogram (if 1, no dropout will be performed on spectrogram)') f.DEFINE_boolean('augmentation_sparse_warp', False, 'whether to use spectrogram sparse warp. USE OF THIS FLAG IS UNSUPPORTED, enable sparse warp will increase training time drastically, and the paper also mentioned that this is not a major factor to improve accuracy.') f.DEFINE_integer('augmentation_sparse_warp_num_control_points', 1, 'specify number of control points') f.DEFINE_integer('augmentation_sparse_warp_time_warping_para', 20, 'time_warping_para') f.DEFINE_integer('augmentation_sparse_warp_interpolation_order', 2, 'sparse_warp_interpolation_order') f.DEFINE_float('augmentation_sparse_warp_regularization_weight', 0.0, 'sparse_warp_regularization_weight') f.DEFINE_integer('augmentation_sparse_warp_num_boundary_points', 1, 'sparse_warp_num_boundary_points') f.DEFINE_boolean('augmentation_freq_and_time_masking', False, 'whether to use frequency and time masking augmentation') f.DEFINE_integer('augmentation_freq_and_time_masking_freq_mask_range', 5, 'max range of masks in the frequency domain when performing freqtime-mask augmentation') f.DEFINE_integer('augmentation_freq_and_time_masking_number_freq_masks', 3, 'number of masks in the frequency domain when performing freqtime-mask augmentation') f.DEFINE_integer('augmentation_freq_and_time_masking_time_mask_range', 2, 'max range of masks in the time domain when performing freqtime-mask augmentation') f.DEFINE_integer('augmentation_freq_and_time_masking_number_time_masks', 3, 'number of masks in the time domain when performing freqtime-mask augmentation') f.DEFINE_float('augmentation_speed_up_std', 0, 'std for speeding-up tempo. If std is 0, this augmentation is not performed') f.DEFINE_boolean('augmentation_pitch_and_tempo_scaling', False, 'whether to use spectrogram speed and tempo scaling') f.DEFINE_float('augmentation_pitch_and_tempo_scaling_min_pitch', 0.95, 'min value of pitch scaling') f.DEFINE_float('augmentation_pitch_and_tempo_scaling_max_pitch', 1.2, 'max value of pitch scaling') f.DEFINE_float('augmentation_pitch_and_tempo_scaling_max_tempo', 1.2, 'max vlaue of tempo scaling') # Global Constants # ================ f.DEFINE_integer('num_iterations', 10000, 'number of optimization steps') f.DEFINE_integer('num_steps', 1, 'number of steps for each client training') f.DEFINE_integer('epochs', 1, 'number of epochs for training') f.DEFINE_integer('eval_num_iters', 1, 'how many iterations for each evaluation') f.DEFINE_float('dropout_rate', 0.05, 'dropout rate for feedforward layers') f.DEFINE_float('dropout_rate2', -1.0, 'dropout rate for layer 2 - defaults to dropout_rate') f.DEFINE_float('dropout_rate3', -1.0, 'dropout rate for layer 3 - defaults to dropout_rate') f.DEFINE_float('dropout_rate4', 0.0, 'dropout rate for layer 4 - defaults to 0.0') f.DEFINE_float('dropout_rate5', 0.0, 'dropout rate for layer 5 - defaults to 0.0') f.DEFINE_float('dropout_rate6', -1.0, 'dropout rate for layer 6 - defaults to dropout_rate') f.DEFINE_float('relu_clip', 20.0, 'ReLU clipping value for non-recurrent layers') f.DEFINE_boolean('relu_mask', False, 'Apply ReLU mask') f.DEFINE_string('optimizer', 'sgd', 'Optimizer (adam, sgd)') f.DEFINE_float('beta1', 0.9, 'beta 1 parameter of Adam optimizer') f.DEFINE_float('beta2', 0.999, 'beta 2 parameter of Adam optimizer') f.DEFINE_float('epsilon', 1e-8, 'epsilon parameter of Adam optimizer') f.DEFINE_float('learning_rate', 0.001, 'learning rate of Adam optimizer') f.DEFINE_float('model_learning_rate', 1, 'learning rate of the model') f.DEFINE_float('y_learning_rate', 0.001, 'learning rate of Adam optimizer') f.DEFINE_float('min_step', 0.001, 'min step') f.DEFINE_boolean('exit_if_learning_rate_unchanged', False, 'if learning rate reaches minimum value and loss does not decrease, end training') f.DEFINE_float('min_mae', 0., 'Training stop if MAE goes below this threshold') # Batch sizes f.DEFINE_integer('microbatch_size', 1, 'number of elements in a microbatch (FL setting)') f.DEFINE_integer('train_batch_size', 1, 'number of elements in a training batch') f.DEFINE_integer('dev_batch_size', 1, 'number of elements in a validation batch') f.DEFINE_integer('test_batch_size', 1, 'number of elements in a test batch') f.DEFINE_boolean('export_sample_only', False, 'not export gradients') f.DEFINE_boolean('export_dropout_mask', False, 'export dropout mask') f.DEFINE_integer('export_batch_size', 1, 'number of elements per batch on the exported graph') # Performance f.DEFINE_integer('inter_op_parallelism_threads', 0, 'number of inter-op parallelism threads - see tf.ConfigProto for more details. USE OF THIS FLAG IS UNSUPPORTED') f.DEFINE_integer('intra_op_parallelism_threads', 0, 'number of intra-op parallelism threads - see tf.ConfigProto for more details. USE OF THIS FLAG IS UNSUPPORTED') f.DEFINE_boolean('use_allow_growth', False, 'use Allow Growth flag which will allocate only required amount of GPU memory and prevent full allocation of available GPU memory') f.DEFINE_boolean('load_cudnn', False, 'Specifying this flag allows one to convert a CuDNN RNN checkpoint to a checkpoint capable of running on a CPU graph.') f.DEFINE_boolean('train_cudnn', False, 'use CuDNN RNN backend for training on GPU. Note that checkpoints created with this flag can only be used with CuDNN RNN, i.e. fine tuning on a CPU device will not work') f.DEFINE_boolean('automatic_mixed_precision', False, 'whether to allow automatic mixed precision training. USE OF THIS FLAG IS UNSUPPORTED. Checkpoints created with automatic mixed precision training will not be usable without mixed precision.') # Sample limits f.DEFINE_integer('limit_train', 0, 'maximum number of elements to use from train set - 0 means no limit') f.DEFINE_integer('limit_dev', 0, 'maximum number of elements to use from validation set- 0 means no limit') f.DEFINE_integer('limit_test', 0, 'maximum number of elements to use from test set- 0 means no limit') # Checkpointing f.DEFINE_string('checkpoint_dir', '', 'directory from which checkpoints are loaded and to which they are saved - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_string('load_checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_string('save_checkpoint_dir', '', 'directory to which checkpoints are saved - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_integer('checkpoint_secs', 600, 'checkpoint saving interval in seconds') f.DEFINE_integer('max_to_keep', 5, 'number of checkpoint files to keep - default value is 5') f.DEFINE_string('load_train', 'auto', 'what checkpoint to load before starting the training process. "last" for loading most recent epoch checkpoint, "best" for loading best validation loss checkpoint, "init" for initializing a new checkpoint, "auto" for trying several options.') f.DEFINE_string('load_evaluate', 'auto', 'what checkpoint to load for evaluation tasks (test epochs, model export, single file inference, etc). "last" for loading most recent epoch checkpoint, "best" for loading best validation loss checkpoint, "auto" for trying several options.') f.DEFINE_integer('checkpoint_iterations', 1000, 'checkpoint saving interval in number of iterations') f.DEFINE_string('output_path', '', 'output path') f.DEFINE_string('input_path', '', 'input path') # Transfer Learning f.DEFINE_integer('drop_source_layers', 0, 'single integer for how many layers to drop from source model (to drop just output == 1, drop penultimate and output ==2, etc)') # Exporting f.DEFINE_string('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported') f.DEFINE_boolean('remove_export', False, 'whether to remove old exported models') f.DEFINE_boolean('export_tflite', False, 'export a graph ready for TF Lite engine') f.DEFINE_integer('n_steps', 16, 'how many timesteps to process at once by the export graph, higher values mean more latency') f.DEFINE_boolean('export_zip', False, 'export a TFLite model and package with LM and info.json') f.DEFINE_string('export_file_name', 'output_graph', 'name for the exported model file name') f.DEFINE_integer('export_beam_width', 500, 'default beam width to embed into exported graph') # Model metadata f.DEFINE_string('export_author_id', 'author', 'author of the exported model. GitHub user or organization name used to uniquely identify the author of this model') f.DEFINE_string('export_model_name', 'model', 'name of the exported model. Must not contain forward slashes.') f.DEFINE_string('export_model_version', '0.0.1', 'semantic version of the exported model. See https://semver.org/. This is fully controlled by you as author of the model and has no required connection with DeepSpeech versions') def str_val_equals_help(name, val_desc): f.DEFINE_string(name, '<{}>'.format(val_desc), val_desc) str_val_equals_help('export_contact_info', 'public contact information of the author. Can be an email address, or a link to a contact form, issue tracker, or discussion forum. Must provide a way to reach the model authors') str_val_equals_help('export_license', 'SPDX identifier of the license of the exported model. See https://spdx.org/licenses/. If the license does not have an SPDX identifier, use the license name.') str_val_equals_help('export_language', 'language the model was trained on - IETF BCP 47 language tag including at least language, script and region subtags. E.g. "en-Latn-UK" or "de-Latn-DE" or "cmn-Hans-CN". Include as much info as you can without loss of precision. For example, if a model is trained on Scottish English, include the variant subtag: "en-Latn-GB-Scotland".') str_val_equals_help('export_min_ds_version', 'minimum DeepSpeech version (inclusive) the exported model is compatible with') str_val_equals_help('export_max_ds_version', 'maximum DeepSpeech version (inclusive) the exported model is compatible with') str_val_equals_help('export_description', 'Freeform description of the model being exported. Markdown accepted. You can also leave this flag unchanged and edit the generated .md file directly. Useful things to describe are demographic and acoustic characteristics of the data used to train the model, any architectural changes, names of public datasets that were used when applicable, hyperparameters used for training, evaluation results on standard benchmark datasets, etc.') # Reporting f.DEFINE_integer('log_level', 1, 'log level for console logs - 0: DEBUG, 1: INFO, 2: WARN, 3: ERROR') f.DEFINE_boolean('show_progressbar', True, 'Show progress for training, validation and testing processes. Log level should be > 0.') f.DEFINE_boolean('log_placement', False, 'whether to log device placement of the operators to the console') f.DEFINE_integer('report_count', 5, 'number of phrases for each of best WER, median WER and worst WER to print out during a WER report') f.DEFINE_string('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/summaries" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_boolean('summary_frames', False, 'add an entry to the TensorBoard for every frame') f.DEFINE_boolean('summary_coefficients', False, 'add an entry to the TensorBoard for every coefficients') f.DEFINE_string('test_output_file', '', 'path to a file to save all src/decoded/distance/loss tuples generated during a test epoch') f.DEFINE_boolean('debug', False, 'debug mode') # Geometry f.DEFINE_integer('n_hidden', 2048, 'layer width to use when initialising layers') # Initialization f.DEFINE_integer('random_seed', 4568, 'default random seed that is used to initialize variables') f.DEFINE_float('inp_length_mul', 1, '') f.DEFINE_integer('inp_length_add', 0, '') f.DEFINE_string('init_x', 'uniform', 'initialized values for reconstructed signal') f.DEFINE_list('init_xs', [], 'initialized values for reconstructed signal (used in eval for multiple utts)') f.DEFINE_float('init_x_noise', 0.5, 'noise level') f.DEFINE_string('init_y', 'perfect', 'initialized values for reconstructed transcript') # Early Stopping f.DEFINE_boolean('early_stop', False, 'Enable early stopping mechanism over validation dataset. If validation is not being run, early stopping is disabled.') f.DEFINE_integer('es_epochs', 25, 'Number of epochs with no improvement after which training will be stopped. Loss is not stored in the checkpoint so when checkpoint is revived it starts the loss calculation from start at that point') f.DEFINE_float('es_min_delta', 0.05, 'Minimum change in loss to qualify as an improvement. This value will also be used in Reduce learning rate on plateau') f.DEFINE_float('es_min_delta_percent', 0.05, 'Minimum change in loss to qualify as an improvement. This value will also be used in Reduce learning rate on plateau') # Reduce learning rate on plateau f.DEFINE_boolean('reduce_lr_on_plateau', False, 'Enable reducing the learning rate if a plateau is reached. This is the case if the validation loss did not improve for some epochs.') f.DEFINE_boolean('reduce_lr_on_num_sum_vectors_applied_reduced', False, 'Enable reducing the learning rate if less than some of sum vectors are applied') f.DEFINE_boolean('reduce_lr_on_num_unit_vectors_applied_reduced', False, 'Enable reducing the learning rate if less than some of unit vectors are applied') f.DEFINE_float('reduce_lr_on_num_unit_vectors_applied_reduced_rate', 0.1, 'rate') f.DEFINE_integer('plateau_epochs', 10, 'Number of epochs to consider for RLROP. Has to be smaller than es_epochs from early stopping') f.DEFINE_integer('sample_plateau_epochs', 1000000, '') f.DEFINE_float('plateau_reduction', 0.1, 'Multiplicative factor to apply to the current learning rate if a plateau has occurred.') f.DEFINE_boolean('force_initialize_learning_rate', False, 'Force re-initialization of learning rate which was previously reduced.') # Decoder f.DEFINE_boolean('utf8', False, 'enable UTF-8 mode. When this is used the model outputs UTF-8 sequences directly rather than using an alphabet mapping.') f.DEFINE_string('alphabet_config_path', 'data/alphabet.txt', 'path to the configuration file specifying the alphabet used by the network. See the comment in data/alphabet.txt for a description of the format.') f.DEFINE_string('scorer_path', 'data/lm/kenlm.scorer', 'path to the external scorer file created with data/lm/generate_package.py') f.DEFINE_alias('scorer', 'scorer_path') f.DEFINE_integer('beam_width', 1024, 'beam width used in the CTC decoder when building candidate transcriptions') f.DEFINE_float('lm_alpha', 0.931289039105002, 'the alpha hyperparameter of the CTC decoder. Language Model weight.') f.DEFINE_float('lm_beta', 1.1834137581510284, 'the beta hyperparameter of the CTC decoder. Word insertion weight.') f.DEFINE_float('cutoff_prob', 1.0, 'only consider characters until this probability mass is reached. 1.0 = disabled.') f.DEFINE_integer('cutoff_top_n', 300, 'only process this number of characters sorted by probability mass for each time step. If bigger than alphabet size, disabled.') # Inference mode f.DEFINE_string('one_shot_infer', '', 'one-shot inference mode: specify a wav file and the script will load the checkpoint and perform inference on it.') # Optimizer mode f.DEFINE_float('lm_alpha_max', 5, 'the maximum of the alpha hyperparameter of the CTC decoder explored during hyperparameter optimization. Language Model weight.') f.DEFINE_float('lm_beta_max', 5, 'the maximum beta hyperparameter of the CTC decoder explored during hyperparameter optimization. Word insertion weight.') f.DEFINE_integer('n_trials', 2400, 'the number of trials to run during hyperparameter optimization.') # Signal reconstruction f.DEFINE_list('optimized_layers', ['layer_6/bias', 'layer_6/weights'], 'Names of layer whose gradients are computed and compared with client gradients') f.DEFINE_integer('num_reconstructed_frames', 0, 'Number of frame to reconstruct-1-batch-5 while keeping other frames. 0 to reconstruct-1-batch-5 the full signal') f.DEFINE_string('reconstructed_pos', 'all', 'Position for partial reconstruction (start, end, random)') f.DEFINE_string('reconstruct_input', 'mfccs', 'Reconstruct audio, not mfccs') f.DEFINE_integer('pad_blank', 0, 'Pad mfccs with blank') # Gradients estimation f.DEFINE_integer('utt_to_reconstruct', -1, "in batch setting, reconstruct 1 utt instead of the whole batch") f.DEFINE_boolean('grad_estimation_sample_unit_vectors', False, "sample unit vectors when estimating gradients (instead of using basis vectors)") f.DEFINE_boolean('grad_estimation_scale_unit_vectors', False, "if True, unit vector will be scaled by so that ||v||^2 = d") f.DEFINE_boolean('grad_estimation_sample_basis_vectors', False, "sample unit vectors from basis vectors (only one coordinate equals to 1)") f.DEFINE_boolean('grad_estimation_sample_by_frame', False, "sample some frames to optimize (only unit vectors that correspond to those frames)") f.DEFINE_boolean('grad_estimation_sample_by_coefficient', False, "sample by MFCC coefficient (only unit vectors that correspond to a coefficient)") f.DEFINE_string('grad_estimation_sample_by_coefficient_weights', '11111111111111111111111111', "length 26 string indicating the weights of coefficient sampling") f.DEFINE_integer('grad_estimation_sample_num_frames', 1, "number of frames to sample at each iteration") f.DEFINE_integer('grad_estimation_sample_size', 64, "number of unit vectors to sample") f.DEFINE_integer('grad_estimation_batch_size', 64, "batch size for gradient estimation") f.DEFINE_integer('grad_estimation_num_radii', 3, "number of radii for each unit vector. Radii are different by a factor of 2") f.DEFINE_boolean('check_updated_values', False, "Check if the updated x reduces the loss. Requires one more batch computation per iteration") f.DEFINE_boolean('apply_best_vector', False, "Apply the best vector or the sum of all unit vectors that reduce the loss.") f.DEFINE_integer('use_line_search_for_applied_vector', 0, "Try different radii with the direction of the best vector") f.DEFINE_integer('unit_vectors_keep_top_k', 10, "keep top k unit vectors that change the loss the most") f.DEFINE_float('use_top_gradients_ratio', 0.5, "only consider gradients with highest absolute values") f.DEFINE_float('gradients_dropout', 0.2, "randomly switch off some gradients in the distance formula") f.DEFINE_string('update_y_strategy', 'alternate_char', 'strategy to optimize transcript') f.DEFINE_string('update_y_transcript_list_path', 'data/transcript-random.txt', 'path to a file containing list of candidates for transcript') f.DEFINE_integer('update_y_transcript_num_samples', 10, 'number of samples for each iteration') f.DEFINE_string('reconstruct', 'x', 'reconstruct x or y or both') f.DEFINE_integer('reconstruct_both_x_y_update_ratio', 10, 'number of x update iterations vs number of y update iterations') f.DEFINE_boolean('normalize', False, 'whether to normalize the input, so the optimization parameters have a more uniform range') f.DEFINE_float('ema', 0, 'use exponential moving average') f.DEFINE_string('gradient_clipping', None, 'gradient clipping') f.DEFINE_float('gradient_clip_value', 1., 'gradient clip value') f.DEFINE_float('gradient_noise', 0., 'gradient noise') f.DEFINE_boolean('use_gradient_sign', False, 'use sign of gradients instead of their estimated values') f.DEFINE_string('gradient_distance', 'cosine', 'function to measure gradients\' similarity') # Regularization f.DEFINE_list('regularization', [], 'regularization term') f.DEFINE_list('alpha', [], 'weight of the regularization term') # Register validators for paths which require a file to be specified f.register_validator('alphabet_config_path', os.path.isfile, message='The file pointed to by --alphabet_config_path must exist and be readable.') f.register_validator('one_shot_infer', lambda value: not value or os.path.isfile(value), message='The file pointed to by --one_shot_infer must exist and be readable.') # sphinx-doc: training_ref_flags_end
#!/usr/bin/python -tt # -*- coding: utf-8 -*- # (c) 2012, Red Hat, Inc # Written by Seth Vidal <skvidal at fedoraproject.org> # (c) 2014, Epic Games, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import traceback import os import yum import rpm import syslog import platform import tempfile import shutil from distutils.version import LooseVersion try: from yum.misc import find_unfinished_transactions, find_ts_remaining from rpmUtils.miscutils import splitFilename transaction_helpers = True except: transaction_helpers = False DOCUMENTATION = ''' --- module: yum version_added: historical short_description: Manages packages with the I(yum) package manager description: - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. options: name: description: - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file." required: true default: null aliases: [] list: description: - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. required: false default: null state: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: false choices: [ "present", "latest", "absent" ] default: "present" enablerepo: description: - I(Repoid) of repositories to enable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false version_added: "0.9" default: null aliases: [] disablerepo: description: - I(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false version_added: "0.9" default: null aliases: [] conf_file: description: - The remote yum configuration file to use for the transaction. required: false version_added: "0.6" default: null aliases: [] disable_gpg_check: description: - Whether to disable the GPG checking of signatures of packages being installed. Has an effect only if state is I(present) or I(latest). required: false version_added: "1.2" default: "no" choices: ["yes", "no"] aliases: [] update_cache: description: - Force updating the cache. Has an effect only if state is I(present) or I(latest). required: false version_added: "1.9" default: "no" choices: ["yes", "no"] aliases: [] notes: [] # informational: requirements for nodes requirements: [ yum ] author: Seth Vidal ''' EXAMPLES = ''' - name: install the latest version of Apache yum: name=httpd state=latest - name: remove the Apache package yum: name=httpd state=absent - name: install the latest version of Apache from the testing repo yum: name=httpd enablerepo=testing state=present - name: install one specific version of Apache yum: name=httpd-2.2.29-1.4.amzn1 state=present - name: upgrade all packages yum: name=* state=latest - name: install the nginx rpm from a remote repo yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - name: install nginx rpm from a local file yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - name: install the 'Development tools' package group yum: name="@Development tools" state=present ''' def_qf = "%{name}-%{version}-%{release}.%{arch}" repoquery='/usr/bin/repoquery' if not os.path.exists(repoquery): repoquery = None yumbin='/usr/bin/yum' def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file if os.geteuid() != 0: if hasattr(my, 'setCacheDir'): my.setCacheDir() else: cachedir = yum.misc.getCacheDir() my.repos.setCacheDir(cachedir) my.conf.cache = 0 return my def install_yum_utils(module): if not module.check_mode: yum_path = module.get_bin_path('yum') if yum_path: rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) if rc == 0: this_path = module.get_bin_path('repoquery') global repoquery repoquery = this_path def po_to_nevra(po): if hasattr(po, 'ui_nevra'): return po.ui_nevra else: return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, is_pkg=False): if en_repos is None: en_repos = [] if dis_repos is None: dis_repos = [] if not repoq: pkgs = [] try: my = yum_base(conf_file) for rid in en_repos: my.repos.enableRepo(rid) for rid in dis_repos: my.repos.disableRepo(rid) e,m,u = my.rpmdb.matchPackageNames([pkgspec]) pkgs = e + m if not pkgs: pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) except Exception, e: module.fail_json(msg="Failure talking to yum: %s" % e) return [ po_to_nevra(p) for p in pkgs ] else: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) if not is_pkg: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] rc2,out2,err2 = module.run_command(cmd) else: rc2,out2,err2 = (0, '', '') if rc == 0 and rc2 == 0: out += out2 return [ p for p in out.split('\n') if p.strip() ] else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) return [] def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): if en_repos is None: en_repos = [] if dis_repos is None: dis_repos = [] if not repoq: pkgs = [] try: my = yum_base(conf_file) for rid in en_repos: my.repos.enableRepo(rid) for rid in dis_repos: my.repos.disableRepo(rid) e,m,u = my.pkgSack.matchPackageNames([pkgspec]) pkgs = e + m if not pkgs: pkgs.extend(my.returnPackagesByDep(pkgspec)) except Exception, e: module.fail_json(msg="Failure talking to yum: %s" % e) return [ po_to_nevra(p) for p in pkgs ] else: myrepoq = list(repoq) r_cmd = ['--disablerepo', ','.join(dis_repos)] myrepoq.extend(r_cmd) r_cmd = ['--enablerepo', ','.join(en_repos)] myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) if rc == 0: return [ p for p in out.split('\n') if p.strip() ] else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) return [] def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): if en_repos is None: en_repos = [] if dis_repos is None: dis_repos = [] if not repoq: retpkgs = [] pkgs = [] updates = [] try: my = yum_base(conf_file) for rid in en_repos: my.repos.enableRepo(rid) for rid in dis_repos: my.repos.disableRepo(rid) pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec) if not pkgs: e,m,u = my.pkgSack.matchPackageNames([pkgspec]) pkgs = e + m updates = my.doPackageLists(pkgnarrow='updates').updates except Exception, e: module.fail_json(msg="Failure talking to yum: %s" % e) for pkg in pkgs: if pkg in updates: retpkgs.append(pkg) return set([ po_to_nevra(p) for p in retpkgs ]) else: myrepoq = list(repoq) r_cmd = ['--disablerepo', ','.join(dis_repos)] myrepoq.extend(r_cmd) r_cmd = ['--enablerepo', ','.join(en_repos)] myrepoq.extend(r_cmd) cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) if rc == 0: return set([ p for p in out.split('\n') if p.strip() ]) else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) return [] def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): if en_repos is None: en_repos = [] if dis_repos is None: dis_repos = [] if not repoq: pkgs = [] try: my = yum_base(conf_file) for rid in en_repos: my.repos.enableRepo(rid) for rid in dis_repos: my.repos.disableRepo(rid) pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec) if not pkgs: e,m,u = my.pkgSack.matchPackageNames([req_spec]) pkgs.extend(e) pkgs.extend(m) e,m,u = my.rpmdb.matchPackageNames([req_spec]) pkgs.extend(e) pkgs.extend(m) except Exception, e: module.fail_json(msg="Failure talking to yum: %s" % e) return set([ po_to_nevra(p) for p in pkgs ]) else: myrepoq = list(repoq) r_cmd = ['--disablerepo', ','.join(dis_repos)] myrepoq.extend(r_cmd) r_cmd = ['--enablerepo', ','.join(en_repos)] myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] rc,out,err = module.run_command(cmd) cmd = myrepoq + ["--qf", qf, req_spec] rc2,out2,err2 = module.run_command(cmd) if rc == 0 and rc2 == 0: out += out2 pkgs = set([ p for p in out.split('\n') if p.strip() ]) if not pkgs: pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf) return pkgs else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) return [] def transaction_exists(pkglist): """ checks the package list to see if any packages are involved in an incomplete transaction """ conflicts = [] if not transaction_helpers: return conflicts # first, we create a list of the package 'nvreas' # so we can compare the pieces later more easily pkglist_nvreas = [] for pkg in pkglist: pkglist_nvreas.append(splitFilename(pkg)) # next, we build the list of packages that are # contained within an unfinished transaction unfinished_transactions = find_unfinished_transactions() for trans in unfinished_transactions: steps = find_ts_remaining(trans) for step in steps: # the action is install/erase/etc., but we only # care about the package spec contained in the step (action, step_spec) = step (n,v,r,e,a) = splitFilename(step_spec) # and see if that spec is in the list of packages # requested for installation/updating for pkg in pkglist_nvreas: # if the name and arch match, we're going to assume # this package is part of a pending transaction # the label is just for display purposes label = "%s-%s" % (n,a) if n == pkg[0] and a == pkg[4]: if label not in conflicts: conflicts.append("%s-%s" % (n,a)) break return conflicts def local_nvra(module, path): """return nvra of a local rpm passed in""" ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) fd = os.open(path, os.O_RDONLY) try: header = ts.hdrFromFdno(fd) finally: os.close(fd) return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME], header[rpm.RPMTAG_VERSION], header[rpm.RPMTAG_RELEASE], header[rpm.RPMTAG_ARCH]) def pkg_to_dict(pkgstr): if pkgstr.strip(): n,e,v,r,a,repo = pkgstr.split('|') else: return {'error_parsing': pkgstr} d = { 'name':n, 'arch':a, 'epoch':e, 'release':r, 'version':v, 'repo':repo, 'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a) } if repo == 'installed': d['yumstate'] = 'installed' else: d['yumstate'] = 'available' return d def repolist(module, repoq, qf="%{repoid}"): cmd = repoq + ["--qf", qf, "-a"] rc,out,err = module.run_command(cmd) ret = [] if rc == 0: ret = set([ p for p in out.split('\n') if p.strip() ]) return ret def list_stuff(module, conf_file, stuff): qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] if conf_file and os.path.exists(conf_file): repoq += ['-c', conf_file] if stuff == 'installed': return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] elif stuff == 'updates': return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] elif stuff == 'available': return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] elif stuff == 'repos': return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ] else: return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ] def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): pkgs = [] res = {} res['results'] = [] res['msg'] = '' res['rc'] = 0 res['changed'] = False tempdir = tempfile.mkdtemp() for spec in items: pkg = None # check if pkgspec is installed (if possible for idempotence) # localpkg if spec.endswith('.rpm') and '://' not in spec: # get the pkg name-v-r.arch if not os.path.exists(spec): res['msg'] += "No Package file matching '%s' found on system" % spec module.fail_json(**res) nvra = local_nvra(module, spec) # look for them in the rpmdb if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): # if they are there, skip it continue pkg = spec # URL elif '://' in spec: pkg = spec # Check if Enterprise Linux 5 or less, as yum on those versions do not support installing via url distribution_version = get_distribution_version() distribution = platform.dist() if distribution[0] == "redhat" and LooseVersion(distribution_version) < LooseVersion("6"): package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) try: rsp, info = fetch_url(module, pkg) data = rsp.read() f = open(package, 'w') f.write(data) f.close() pkg = package except Exception, e: shutil.rmtree(tempdir) module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) #groups :( elif spec.startswith('@'): # complete wild ass guess b/c it's a group pkg = spec # range requires or file-requires or pkgname :( else: # most common case is the pkg is already installed and done # short circuit all the bs - and search for it as a pkg in is_installed # if you find it then we're done if not set(['*','?']).intersection(set(spec)): installed_pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) if installed_pkgs: res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec)) continue # look up what pkgs provide this pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) if not pkglist: res['msg'] += "No Package matching '%s' found available, installed or updated" % spec module.fail_json(**res) # if any of the packages are involved in a transaction, fail now # so that we don't hang on the yum operation later conflicts = transaction_exists(pkglist) if len(conflicts) > 0: res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) module.fail_json(**res) # if any of them are installed # then nothing to do found = False for this in pkglist: if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True): found = True res['results'].append('%s providing %s is already installed' % (this, spec)) break # if the version of the pkg you have installed is not in ANY repo, but there are # other versions in the repos (both higher and lower) then the previous checks won't work. # so we check one more time. This really only works for pkgname - not for file provides or virt provides # but virt provides should be all caught in what_provides on its own. # highly irritating if not found: if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): found = True res['results'].append('package providing %s is already installed' % (spec)) if found: continue # if not - then pass in the spec as what to install # we could get here if nothing provides it but that's not # the error we're catching here pkg = spec pkgs.append(pkg) if pkgs: cmd = yum_basecmd + ['install'] + pkgs if module.check_mode: # Remove rpms downloaded for EL5 via url try: shutil.rmtree(tempdir) except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True) changed = True rc, out, err = module.run_command(cmd) if (rc == 1): for spec in items: # Fail on invalid urls: if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): err = 'Package at %s could not be installed' % spec module.fail_json(changed=False,msg=err,rc=1) if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: # avoid failing in the 'Nothing To Do' case # this may happen with an URL spec. # for an already installed group, # we get rc = 0 and 'Nothing to do' in out, not in err. rc = 0 err = '' out = '%s: Nothing to do' % spec changed = False res['rc'] = rc res['results'].append(out) res['msg'] += err # FIXME - if we did an install - go and check the rpmdb to see if it actually installed # look for each pkg in rpmdb # look for each pkg via obsoletes # Record change res['changed'] = changed # Remove rpms downloaded for EL5 via url try: shutil.rmtree(tempdir) except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(**res) def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): pkgs = [] res = {} res['results'] = [] res['msg'] = '' res['changed'] = False res['rc'] = 0 for pkg in items: is_group = False # group remove - this is doom on a stick if pkg.startswith('@'): is_group = True else: if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): res['results'].append('%s is not installed' % pkg) continue pkgs.append(pkg) if pkgs: # run an actual yum transaction cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command(cmd) res['rc'] = rc res['results'].append(out) res['msg'] = err # compile the results into one batch. If anything is changed # then mark changed # at the end - if we've end up failed then fail out of the rest # of the process # at this point we should check to see if the pkg is no longer present for pkg in pkgs: if not pkg.startswith('@'): # we can't sensibly check for a group being uninstalled reliably # look to see if the pkg shows up from is_installed. If it doesn't if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): res['changed'] = True else: module.fail_json(**res) if rc != 0: module.fail_json(**res) module.exit_json(**res) def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res = {} res['results'] = [] res['msg'] = '' res['changed'] = False res['rc'] = 0 for spec in items: pkg = None basecmd = 'update' cmd = '' # groups, again if spec.startswith('@'): pkg = spec elif spec == '*': #update all # use check-update to see if there is any need rc,out,err = module.run_command(yum_basecmd + ['check-update']) if rc == 100: cmd = yum_basecmd + [basecmd] else: res['results'].append('All packages up to date') continue # dep/pkgname - find it else: if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): basecmd = 'update' else: basecmd = 'install' pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) if not pkglist: res['msg'] += "No Package matching '%s' found available, installed or updated" % spec module.fail_json(**res) nothing_to_do = True for this in pkglist: if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break if nothing_to_do: res['results'].append("All packages providing %s are up to date" % spec) continue # if any of the packages are involved in a transaction, fail now # so that we don't hang on the yum operation later conflicts = transaction_exists(pkglist) if len(conflicts) > 0: res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) module.fail_json(**res) pkg = spec if not cmd: cmd = yum_basecmd + [basecmd, pkg] if module.check_mode: return module.exit_json(changed=True) rc, out, err = module.run_command(cmd) res['rc'] += rc res['results'].append(out) res['msg'] += err # FIXME if it is - update it and check to see if it applied # check to see if there is no longer an update available for the pkgspec if rc: res['failed'] = True else: res['changed'] = True module.exit_json(**res) def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, disable_gpg_check): # take multiple args comma separated items = pkgspec.split(',') # need debug level 2 to get 'Nothing to do' for groupinstall. yum_basecmd = [yumbin, '-d', '2', '-y'] if not repoquery: repoq = None else: repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] if conf_file and os.path.exists(conf_file): yum_basecmd += ['-c', conf_file] if repoq: repoq += ['-c', conf_file] dis_repos =[] en_repos = [] if disablerepo: dis_repos = disablerepo.split(',') r_cmd = ['--disablerepo=%s' % disablerepo] yum_basecmd.extend(r_cmd) if enablerepo: en_repos = enablerepo.split(',') r_cmd = ['--enablerepo=%s' % enablerepo] yum_basecmd.extend(r_cmd) if state in ['installed', 'present', 'latest']: if module.params.get('update_cache'): module.run_command(yum_basecmd + ['makecache']) my = yum_base(conf_file) try: if disablerepo: my.repos.disableRepo(disablerepo) current_repos = my.repos.repos.keys() if enablerepo: try: my.repos.enableRepo(enablerepo) new_repos = my.repos.repos.keys() for i in new_repos: if not i in current_repos: rid = my.repos.getRepo(i) a = rid.repoXML.repoid current_repos = new_repos except yum.Errors.YumBaseError, e: module.fail_json(msg="Error setting/accessing repos: %s" % (e)) except yum.Errors.YumBaseError, e: module.fail_json(msg="Error accessing repos: %s" % e) if state in ['installed', 'present']: if disable_gpg_check: yum_basecmd.append('--nogpgcheck') install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state in ['removed', 'absent']: remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state == 'latest': if disable_gpg_check: yum_basecmd.append('--nogpgcheck') latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) # should be caught by AnsibleModule argument_spec return dict(changed=False, failed=True, results='', errors='unexpected state') def main(): # state=installed name=pkgspec # state=removed name=pkgspec # state=latest name=pkgspec # # informational commands: # list=installed # list=updates # list=available # list=repos # list=pkgspec module = AnsibleModule( argument_spec = dict( name=dict(aliases=['pkg']), # removed==absent, installed==present, these are accepted as aliases state=dict(default='installed', choices=['absent','present','installed','removed','latest']), enablerepo=dict(), disablerepo=dict(), list=dict(), conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), update_cache=dict(required=False, default="no", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), required_one_of = [['name','list']], mutually_exclusive = [['name','list']], supports_check_mode = True ) # this should not be needed, but exists as a failsafe params = module.params if params['install_repoquery'] and not repoquery and not module.check_mode: install_yum_utils(module) if params['list']: if not repoquery: module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") results = dict(results=list_stuff(module, params['conf_file'], params['list'])) module.exit_json(**results) else: pkg = params['name'] state = params['state'] enablerepo = params.get('enablerepo', '') disablerepo = params.get('disablerepo', '') disable_gpg_check = params['disable_gpg_check'] res = ensure(module, state, pkg, params['conf_file'], enablerepo, disablerepo, disable_gpg_check) module.fail_json(msg="we should never get here unless this all failed", **res) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
import os import json try: from importlib import reload as reload_module except ImportError: from imp import reload as reload_module try: from urllib.request import urlopen from http.client import HTTPMessage except ImportError: from urllib import urlopen from django.test import TestCase from django.test.utils import override_settings from django.contrib.gis.geos import Point from django.utils.html import escapejs from django.conf import settings as test_app_settings from django import forms as django_forms from mapwidgets import widgets as mw_widgets from .utils import html_escape, get_textarea_html GOOGLE_MAP_API_KEY = os.environ.get("TEST_GOOGLE_MAP_API_KEY", test_app_settings.GOOGLE_MAP_API_KEY) DJANGO_DEFAULT_SRID_VALUE = 4326 GOOGLE_MAP_DEFAULT_SRID_VALUE = 4326 class GooglePointWidgetUnitTests(TestCase): def test_widget_with_default_settings(self): """ Test the widget with default settings which is defined in django settings file """ zoom = 15 default_map_center = [51.5073509, -0.12775829999] widget_settings = { "GooglePointFieldWidget": ( ("zoom", zoom), ("mapCenterLocation", default_map_center), ) } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GooglePointFieldWidget() self.assertEqual(hasattr(widget, "settings"), True) self.assertEqual(hasattr(widget, "settings_namespace"), True) self.assertEqual(isinstance(widget.media, django_forms.Media), True) # test `map_options` method options_str = widget.map_options() options = json.loads(options_str) self.assertEqual(options.get("zoom"), zoom) self.assertEqual(options.get("mapCenterLocation"), default_map_center) # test render with Point object value point = Point(-104.9903, 39.7392, srid=DJANGO_DEFAULT_SRID_VALUE) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) self.assertIn(get_textarea_html(widget_html_elem_id, widget_html_elem_name, point), result) self.assertIn(escapejs(options_str), result) # test render with serialized data value result = widget.render(name=widget_html_elem_name, value=widget.serialize(point)) self.assertIn(widget.serialize(point), result) # test widget `attrs` param w = mw_widgets.GooglePointFieldWidget(attrs={"max-height": 600}) self.assertIn("max-height", w.attrs) # test widget render `attrs` param with `None` value self.assertIn(widget_html_elem_name, w.render(name=widget_html_elem_name, value=None, attrs=None)) def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 11 default_map_center = [52.5073509, -0.23775829999] widget_settings = { "GooglePointFieldWidget": ( ("zoom", zoom), ("mapCenterLocation", default_map_center), ) } widget = mw_widgets.GooglePointFieldWidget(settings=widget_settings) self.assertEqual(hasattr(widget, "settings"), True) self.assertEqual(hasattr(widget, "settings_namespace"), True) self.assertEqual(isinstance(widget.media, django_forms.Media), True) # test `map_options` method options_str = widget.map_options() options = json.loads(options_str) self.assertEqual(options.get("zoom"), zoom) self.assertEqual(options.get("mapCenterLocation"), default_map_center) # test render with Point object value point = Point(-105.9903, 38.73922, srid=DJANGO_DEFAULT_SRID_VALUE) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) self.assertIn(get_textarea_html(widget_html_elem_id, widget_html_elem_name, point), result) self.assertIn(escapejs(options_str), result) # test render with serialized data value result = widget.render(name=widget_html_elem_name, value=widget.serialize(point)) self.assertIn(widget.serialize(point), result) def test_widget_with_different_srid(self): """ Test the widget with a different `srid` value instead of Geo Django default """ point = Point(-16351.8201902, 6708983.38973, srid=3857) widget_html_elem_id = "id_location" widget_html_elem_name = "location" widget = mw_widgets.GooglePointFieldWidget(map_srid=3857) result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) ogr = point.ogr ogr.transform(GOOGLE_MAP_DEFAULT_SRID_VALUE) point_with_google_map_srid_format = ogr self.assertIn(widget.serialize(point_with_google_map_srid_format), result) class GooglePointInlineWidgetUnitTests(TestCase): def test_widget_with_default_settings(self): """ Test widget with default settings which is defined in django settings file """ zoom = 15 default_map_center = [51.5073509, -0.12775829999] widget_settings = { "GooglePointFieldWidget": ( ("zoom", zoom), ("mapCenterLocation", default_map_center), ) } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GooglePointFieldInlineWidget() self.assertEqual(hasattr(widget, "settings"), True) self.assertEqual(hasattr(widget, "settings_namespace"), True) self.assertEqual(isinstance(widget.media, django_forms.Media), True) # test `map_options` method options_str = widget.map_options() options = json.loads(options_str) self.assertEqual(options.get("zoom"), zoom) self.assertEqual(options.get("mapCenterLocation"), default_map_center) # test render with Point object value point = Point(-104.9903, 39.73922, srid=DJANGO_DEFAULT_SRID_VALUE) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) self.assertIn(get_textarea_html(widget_html_elem_id, widget_html_elem_name, point), result) # test render with serialized data value result = widget.render(name=widget_html_elem_name, value=widget.serialize(point)) self.assertIn(widget.serialize(point), result) # test widget as a formset empty form result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) inline_widget_data = widget.get_js_widget_data(widget_html_elem_name, widget_html_elem_id) self.assertIn(escapejs(json.dumps(inline_widget_data)), result) # test widget `attrs` param w = mw_widgets.GooglePointFieldInlineWidget(attrs={"max-height": 600}) self.assertIn("max-height", w.attrs) # test widget render `attrs` param with `None` value self.assertIn(widget_html_elem_name, w.render(name=widget_html_elem_name, value=None, attrs=None)) def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 11 default_map_center = [52.5073509, -0.23775829999] widget_settings = { "GooglePointFieldWidget": ( ("zoom", zoom), ("mapCenterLocation", default_map_center), ) } widget = mw_widgets.GooglePointFieldInlineWidget(settings=widget_settings) self.assertEqual(hasattr(widget, "settings"), True) self.assertEqual(hasattr(widget, "settings_namespace"), True) self.assertEqual(isinstance(widget.media, django_forms.Media), True) # test `map_options` method options_str = widget.map_options() options = json.loads(options_str) self.assertEqual(options.get("zoom"), zoom) self.assertEqual(options.get("mapCenterLocation"), default_map_center) # test render with Point object value point = Point(-105.9903, 38.73922, srid=DJANGO_DEFAULT_SRID_VALUE) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) self.assertIn(get_textarea_html(widget_html_elem_id, widget_html_elem_name, point), result) # test render with serialized data value result = widget.render(name=widget_html_elem_name, value=widget.serialize(point)) self.assertIn(widget.serialize(point), result) # test widget as a formset empty form widget_html_elem_id = "__prefix__id_location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) self.assertIn(widget.serialize(point), result) inline_widget_data = widget.get_js_widget_data(widget_html_elem_name, widget_html_elem_id) self.assertIn(escapejs(json.dumps(inline_widget_data)), result) class GoogleStaticMapWidgetUnitTests(TestCase): def test_widget_with_default_settings(self): """ Test the widget with default settings which is defined in django settings file """ zoom = 13 map_size = "200x200" widget_settings = { "GoogleStaticMapWidget": ( ("zoom", zoom), ("size", map_size), ), "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticMapWidget() settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) # test render point = Point(-105.9903, 38.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urlopen(map_image_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png") # test map_image_url with `None` value result = widget.render(name=widget_html_elem_name, value=None, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(None) self.assertIn(map_image_url, result) def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 9 map_size = "100x100" widget_settings = { "GoogleStaticMapWidget": ( ("zoom", zoom), ("size", map_size), ), "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticMapWidget(zoom=zoom, size=map_size) settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) # test render point = Point(-105.9903, 38.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urlopen(map_image_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png") class GoogleStaticOverlayMapWidgetUnitTests(TestCase): def test_widget_with_default_settings(self): """ Test the widget with default settings which is defined in django settings file """ zoom = 18 map_size = "400x400" thumbnail_size = "100x100" widget_settings = { "GoogleStaticOverlayMapWidget": ( ("zoom", zoom), ("size", map_size), ("thumbnail_size", thumbnail_size), ), "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticOverlayMapWidget() settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) self.assertEqual(settings.get("thumbnail_size"), thumbnail_size) # test render point = Point(-92.9903, 34.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urlopen(map_image_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png") # test thumbnail_image_url thumbnail_url = widget.get_thumbnail_url(point) res = urlopen(thumbnail_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png") # test map_image_url with `None` value result = widget.render(name=widget_html_elem_name, value=None, attrs={'id': widget_html_elem_id}) thumbnail_url = widget.get_thumbnail_url(None) self.assertIn(thumbnail_url, result) def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 18 map_size = "300x300" thumbnail_size = "75x75" widget_settings = { "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticOverlayMapWidget(zoom=zoom, size=map_size, thumbnail_size=thumbnail_size) settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) # test render point = Point(-105.9903, 38.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urlopen(map_image_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png") # test thumbnail_image_url thumbnail_url = widget.get_thumbnail_url(point) res = urlopen(thumbnail_url) self.assertEqual(res.getcode(), 200) if hasattr(res.info(), 'type'): self.assertEqual(res.info().type, "image/png") else: self.assertEqual(res.info().get_content_type(), "image/png")
""" The Plaid API The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from plaid.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from plaid.model.external_payment_schedule_base import ExternalPaymentScheduleBase from plaid.model.payment_schedule_interval import PaymentScheduleInterval globals()['ExternalPaymentScheduleBase'] = ExternalPaymentScheduleBase globals()['PaymentScheduleInterval'] = PaymentScheduleInterval class ExternalPaymentScheduleGet(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = True @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'interval': (PaymentScheduleInterval,), # noqa: E501 'interval_execution_day': (int,), # noqa: E501 'start_date': (date,), # noqa: E501 'end_date': (date, none_type,), # noqa: E501 'adjusted_start_date': (date, none_type,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'interval': 'interval', # noqa: E501 'interval_execution_day': 'interval_execution_day', # noqa: E501 'start_date': 'start_date', # noqa: E501 'end_date': 'end_date', # noqa: E501 'adjusted_start_date': 'adjusted_start_date', # noqa: E501 } required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances', ]) @convert_js_args_to_python_args def __init__(self, interval, interval_execution_day, start_date, end_date, adjusted_start_date, *args, **kwargs): # noqa: E501 """ExternalPaymentScheduleGet - a model defined in OpenAPI Args: interval (PaymentScheduleInterval): interval_execution_day (int): The day of the interval on which to schedule the payment. If the payment interval is weekly, `interval_execution_day` should be an integer from 1 (Monday) to 7 (Sunday). If the payment interval is monthly, `interval_execution_day` should be an integer indicating which day of the month to make the payment on. Integers from 1 to 28 can be used to make a payment on that day of the month. Negative integers from -1 to -5 can be used to make a payment relative to the end of the month. To make a payment on the last day of the month, use -1; to make the payment on the second-to-last day, use -2, and so on. start_date (date): A date in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). Standing order payments will begin on the first `interval_execution_day` on or after the `start_date`. If the first `interval_execution_day` on or after the start date is also the same day that `/payment_initiation/payment/create` was called, the bank *may* make the first payment on that day, but it is not guaranteed to do so. end_date (date, none_type): A date in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). Standing order payments will end on the last `interval_execution_day` on or before the `end_date`. If the only `interval_execution_day` between the start date and the end date (inclusive) is also the same day that `/payment_initiation/payment/create` was called, the bank *may* make a payment on that day, but it is not guaranteed to do so. adjusted_start_date (date, none_type): The start date sent to the bank after adjusting for holidays or weekends. Will be provided in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). If the start date did not require adjustment, this field will be `null`. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } required_args = { 'interval': interval, 'interval_execution_day': interval_execution_day, 'start_date': start_date, 'end_date': end_date, 'adjusted_start_date': adjusted_start_date, } model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info( constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for var_name, var_value in required_args.items(): setattr(self, var_name, var_value) for var_name, var_value in kwargs.items(): if var_name in unused_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ not self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) @cached_property def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error beause the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading lazy_import() return { 'anyOf': [ ], 'allOf': [ ExternalPaymentScheduleBase, ], 'oneOf': [ ], }
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listtransactions API from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, COIN from io import BytesIO def txFromHex(hexstring): tx = CTransaction() f = BytesIO(hex_str_to_bytes(hexstring)) tx.deserialize(f) return tx class ListTransactionsTest(BitcoinTestFramework): def setup_nodes(self): #This test requires mocktime enable_mocktime() return start_nodes(4, self.options.tmpdir) def run_test(self): # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) assert_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) assert_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"send"}, {"amount":Decimal("-0.2")}) assert_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"receive"}, {"amount":Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = { self.nodes[0].getnewaddress() : 0.11, self.nodes[1].getnewaddress() : 0.22, self.nodes[0].getaccountaddress("from1") : 0.33, self.nodes[1].getaccountaddress("toself") : 0.44 } txid = self.nodes[1].sendmany("", send_to) self.sync_all() assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.11")}, {"txid":txid} ) assert_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.11")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.22")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.22")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.33")}, {"txid":txid} ) assert_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.33")}, {"txid":txid, "account" : "from1"} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.44")}, {"txid":txid, "account" : ""} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.44")}, {"txid":txid, "account" : "toself"} ) multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category":"receive","amount":Decimal("0.1")}, {"txid":txid, "account" : "watchonly"} ) # rbf is disabled in 3DCoin Core # self.run_rbf_opt_in_test() # Check that the opt-in-rbf flag works properly, for sent and received # transactions. def run_rbf_opt_in_test(self): # Check whether a transaction signals opt-in RBF itself def is_opt_in(node, txid): rawtx = node.getrawtransaction(txid, 1) for x in rawtx["vin"]: if x["sequence"] < 0xfffffffe: return True return False # Find an unconfirmed output matching a certain txid def get_unconfirmed_utxo_entry(node, txid_to_match): utxo = node.listunspent(0, 0) for i in utxo: if i["txid"] == txid_to_match: return i return None # 1. Chain a few transactions that don't opt-in. txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) assert(not is_opt_in(self.nodes[0], txid_1)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) # Tx2 will build off txid_1, still not opting in to RBF. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) # Create tx2 using createrawtransaction inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.999} tx2 = self.nodes[1].createrawtransaction(inputs, outputs) tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"] txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) # ...and check the result assert(not is_opt_in(self.nodes[1], txid_2)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) # Tx3 will opt-in to RBF utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}] outputs = {self.nodes[1].getnewaddress(): 0.998} tx3 = self.nodes[0].createrawtransaction(inputs, outputs) tx3_modified = txFromHex(tx3) tx3_modified.vin[0].nSequence = 0 tx3 = bytes_to_hex_str(tx3_modified.serialize()) tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex'] txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) assert(is_opt_in(self.nodes[0], txid_3)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) # Tx4 will chain off tx3. Doesn't signal itself, but depends on one # that does. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.997} tx4 = self.nodes[1].createrawtransaction(inputs, outputs) tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"] txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) assert(not is_opt_in(self.nodes[1], txid_4)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) # Replace tx3, and check that tx4 becomes unknown tx3_b = tx3_modified tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee tx3_b = bytes_to_hex_str(tx3_b.serialize()) tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex'] txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True) assert(is_opt_in(self.nodes[0], txid_3b)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) # Check gettransaction as well: for n in self.nodes[0:2]: assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") # After mining a transaction, it's no longer BIP125-replaceable self.nodes[0].generate(1) assert(txid_3b not in self.nodes[0].getrawmempool()) assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") if __name__ == '__main__': ListTransactionsTest().main()
import networkx as nx import matplotlib.pyplot as plt import exact_controllability as ECT from networkx.utils import powerlaw_sequence import operator import random import csv import copy import subprocess, os import time import numpy as np from ControllabilityRobustnessBasedOnEdgeAttack import RandomEdgeAttack from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeDegreeAttack from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeDegreeAttack from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeBetweennessAttack from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeBetweennessAttack import strutral_controllability as SC def EdgeAttackBA(): start_time = time.time() n = 200 m = 3 fraction = 0.2 E = 591 E_rm = 118 run_cnt = 100 #******** Run Node Attack 1 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 0 for i in range(run_cnt): G1 = nx.barabasi_albert_graph(n, m, seed=rndseed) print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack1_BA.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) #******** Run Node Attack 2 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 0 for i in range(run_cnt): G1 = nx.barabasi_albert_graph(n, m, seed=rndseed) print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack2_BA.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) #******** Run Node Attack 3 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 0 for i in range(run_cnt): G1 = nx.barabasi_albert_graph(n, m, seed=rndseed) print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack3_BA.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) #******** Run Node Attack 4 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 0 for i in range(run_cnt): G1 = nx.barabasi_albert_graph(n, m, seed=rndseed) print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack4_BA.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) #******** Run Node Attack 5 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 0 for i in range(run_cnt): G1 = nx.barabasi_albert_graph(n, m, seed=rndseed) print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack5_BA.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) print "--- cost time %s seconds ---" %(time.time() - start_time) def EdgeAttackUSAir(): start_time = time.time() n = 332 fraction = 0.2 E = 2126 E_rm = int(0.2 * E) run_cnt = 100 #******** Run Edge Attack 1 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 1; for i in range(run_cnt): G1 = nx.read_pajek("dataset/USAir97.net") print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) random.seed(rndseed) ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1; tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack1_USAir.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 3 #******** Run Edge Attack 2 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G1 = nx.read_pajek("dataset/USAir97.net") print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack2_USAir.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 3 #******** Run Edge Attack 3 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G1 = nx.read_pajek("dataset/USAir97.net") print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack3_USAir.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 3 #******** Run Edge Attack 4 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G1 = nx.read_pajek("dataset/USAir97.net") print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack4_USAir.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 3 #******** Run Edge Attack 5 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G1 = nx.read_pajek("dataset/USAir97.net") print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack5_USAir.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) print "--- cost time %s seconds ---" %(time.time() - start_time) def EdgeAttackErdosNetwork(): start_time = time.time() n = 429 fraction = 0.2 E = 1312 E_rm = int(0.2 * E) run_cnt = 30 #******** Run Node Attack 1 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) rndseed = 1 for i in range(run_cnt): G = nx.read_pajek("dataset/Erdos971_revised.net") G1 = max(nx.connected_component_subgraphs(G),key=len) print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) random.seed(rndseed) ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] rndseed += 1 tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack1_ErdosNetwork.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 1 random.seed() #******** Run Node Attack 2 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G = nx.read_pajek("dataset/Erdos971_revised.net") G1 = max(nx.connected_component_subgraphs(G),key=len) print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack2_ErdosNetwork.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 1 random.seed() #******** Run Node Attack 3 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G = nx.read_pajek("dataset/Erdos971_revised.net") G1 = max(nx.connected_component_subgraphs(G),key=len) print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack3_ErdosNetwork.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 1 random.seed() #******** Run Node Attack 4 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G = nx.read_pajek("dataset/Erdos971_revised.net") G1 = max(nx.connected_component_subgraphs(G),key=len) print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack4_ErdosNetwork.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) run_cnt = 1 random.seed() #******** Run Node Attack 5 ********# tot_ND1 = [0] * (E_rm + 1) tot_T1 = [0] * (E_rm + 1) for i in range(run_cnt): G = nx.read_pajek("dataset/Erdos971_revised.net") G1 = max(nx.connected_component_subgraphs(G),key=len) print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<" print "graph info", nx.info(G1) ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction) tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)] tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1] tot_T1 = T1 tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1] tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1] with open("results2/edge_attack5_ErdosNetwork.csv", "w") as f: writer = csv.writer(f, delimiter=',') writer.writerows(zip(tot_T1, tot_ND1)) print "--- cost time %s seconds ---" %(time.time() - start_time) def ReadPajek(filename): '''Read pajek file to construct DiGraph''' G = nx.DiGraph() fp = open(filename, 'r') line = fp.readline() while line: if line[0] == '*': line = line.strip().split() label = line[0] number = int(line[1]) if label == '*Vertices' or label == '*vertices': NodeNum = number for i in range(NodeNum): NodeLine = fp.readline() NodeLine = NodeLine.strip().split() NodeID = int(NodeLine[0]) NodeLabel = NodeLine[1] G.add_node(NodeID) elif label == '*Arcs' or label == '*arcs': EdgeNum = number for i in range(EdgeNum): EdgeLine = fp.readline() EdgeLine = EdgeLine.strip().split() u = int(EdgeLine[0]) v = int(EdgeLine[1]) #w = float(EdgeLine[2]) G.add_edge(u, v) else: pass line = fp.readline() fp.close() return G def EdgeAttack(G): """ Edge attack experiments on real world networks Params: G: A directed network of networkx Returns: None. Print the network controllability n_D after 5% 10% 15% 20% edges removed """ NodesNum = G.number_of_nodes() EdgesNum = G.number_of_edges() # Edge remove fraction F0, F1, F2, F3, F4 F1 = 0.05 F2 = 0.10 F3 = 0.15 F4 = 0.20 LRA = [] LID = [] LRD = [] LIB = [] LRB = [] # Following is Edge Random Attack (RA) print '########## Edge RA ##########' G1 = copy.deepcopy(G) RandomEdges = copy.deepcopy(G1.edges()) random.shuffle(RandomEdges) i = 0 while i < int(F1 * EdgesNum): u, v = RandomEdges[i] G1.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0) print F1, nD LRA.append(nD) while i < int(F2 * EdgesNum): u, v = RandomEdges[i] G1.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0) print F2, nD LRA.append(nD) while i < int(F3 * EdgesNum): u, v = RandomEdges[i] G1.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0) print F3, nD LRA.append(nD) while i < int(F4 * EdgesNum): u, v = RandomEdges[i] G1.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0) print F4, nD LRA.append(nD) G1.clear() RandomEdges = [] # Following is Initial Edge Degree Attack (IDA) print '########## Edge IDA ##########' G2 = copy.deepcopy(G) NodeDegrees = nx.degree(G2) EdgeDegrees = {} for u, v in G2.edges_iter(): # Calculate the edge degrees EdgeDegrees[(u, v)] = NodeDegrees[u] * NodeDegrees[v] # Sort the edges decrendingly according to edge degree SortedEdges = sorted(EdgeDegrees, key=EdgeDegrees.get, reverse=True) i = 0 while i < int(F1 * EdgesNum): u, v = SortedEdges[i] G2.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0) print F1, nD LID.append(nD) while i < int(F2 * EdgesNum): u, v = SortedEdges[i] G2.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0) print F2, nD LID.append(nD) while i < int(F3 * EdgesNum): u, v = SortedEdges[i] G2.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0) print F3, nD LID.append(nD) while i < int(F4 * EdgesNum): u, v = SortedEdges[i] G2.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0) print F4, nD LID.append(nD) G2.clear() NodeDegrees = {} EdgeDegrees = {} SortedEdges = [] # Following is Recalculated Edge Degree Attack (RDA) print '########## Edge RDA ##########' G3 = copy.deepcopy(G) i = 0 while i < int(F1 * EdgesNum): # Find the edge with max edge degree at present MaxU = -1; MaxV = -1; MaxDegree = -1; NodeDegrees = nx.degree(G3) for (u, v) in G3.edges_iter(): CurDegree = NodeDegrees[u] * NodeDegrees[v] if CurDegree > MaxDegree: MaxDegree = CurDegree MaxU = u MaxV = v G3.remove_edge(MaxU, MaxV) i += 1 nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0) print F1, nD LRD.append(nD) while i < int(F2 * EdgesNum): # Find the edge with max edge degree at present MaxU = -1; MaxV = -1; MaxDegree = -1; NodeDegrees = nx.degree(G3) for (u, v) in G3.edges_iter(): CurDegree = NodeDegrees[u] * NodeDegrees[v] if CurDegree > MaxDegree: MaxDegree = CurDegree MaxU = u MaxV = v G3.remove_edge(MaxU, MaxV) i += 1 nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0) print F2, nD LRD.append(nD) while i < int(F3 * EdgesNum): # Find the edge with max edge degree at present MaxU = -1; MaxV = -1; MaxDegree = -1; NodeDegrees = nx.degree(G3) for (u, v) in G3.edges_iter(): CurDegree = NodeDegrees[u] * NodeDegrees[v] if CurDegree > MaxDegree: MaxDegree = CurDegree MaxU = u MaxV = v G3.remove_edge(MaxU, MaxV) i += 1 nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0) print F3, nD LRD.append(nD) while i < int(F4 * EdgesNum): # Find the edge with max edge degree at present MaxU = -1; MaxV = -1; MaxDegree = -1; NodeDegrees = nx.degree(G3) for (u, v) in G3.edges_iter(): CurDegree = NodeDegrees[u] * NodeDegrees[v] if CurDegree > MaxDegree: MaxDegree = CurDegree MaxU = u MaxV = v G3.remove_edge(MaxU, MaxV) i += 1 nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0) print F4, nD LRD.append(nD) G3.clear() # Folloing is Initial Edge Betweenness Attack (IBA) print '########## Edge IBA ##########' G4 = copy.deepcopy(G) EdgeBetweenness = nx.edge_betweenness_centrality(G4, k=1500) #EdgeBetweenness = nx.edge_current_flow_betweenness_centrality(G4, solver='lu') SortedBetEdges = sorted(EdgeBetweenness, key=EdgeBetweenness.get, reverse=True) i = 0 while i < int(F1 * EdgesNum): u, v = SortedBetEdges[i] G4.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0) print F1, nD LIB.append(nD) while i < int(F2 * EdgesNum): u, v = SortedBetEdges[i] G4.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0) print F2, nD LIB.append(nD) while i < int(F3 * EdgesNum): u, v = SortedBetEdges[i] G4.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0) print F3, nD LIB.append(nD) while i < int(F4 * EdgesNum): u, v = SortedBetEdges[i] G4.remove_edge(u, v) i += 1 nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0) print F4, nD LIB.append(nD) G4.clear() EdgeBetweenness = {} SortedBetEdges = [] # Following is Recalculated Edge Betweenness Attack (RBA) print '########## Edge RBA ##########' G5 = copy.deepcopy(G) i = 0 while i < int(F1 * EdgesNum): #EdgeBets = nx.edge_betweenness_centrality(G5) EdgeBets = nx.edge_betweenness_centrality(G5, k=1500) # Find the edge with Max edge betweenness uMax = -1; vMax = -1; betMax = -1.0; for ((u, v), bet) in EdgeBets.iteritems(): if bet > betMax: betMax = bet uMax = u vMax = v G5.remove_edge(uMax, vMax) i += 1 nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0) print F1, nD LRB.append(nD) while i < int(F2 * EdgesNum): #EdgeBets = nx.edge_betweenness_centrality(G5) EdgeBets = nx.edge_betweenness_centrality(G5, k=1500) # Find the edge with Max edge betweenness uMax = -1; vMax = -1; betMax = -1.0; for ((u, v), bet) in EdgeBets.iteritems(): if bet > betMax: betMax = bet uMax = u vMax = v G5.remove_edge(uMax, vMax) i += 1 nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0) print F2, nD LRB.append(nD) while i < int(F3 * EdgesNum): #EdgeBets = nx.edge_betweenness_centrality(G5) EdgeBets = nx.edge_betweenness_centrality(G5, k=1500) # Find the edge with Max edge betweenness uMax = -1; vMax = -1; betMax = -1.0; for ((u, v), bet) in EdgeBets.iteritems(): if bet > betMax: betMax = bet uMax = u vMax = v G5.remove_edge(uMax, vMax) i += 1 nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0) print F3, nD LRB.append(nD) while i < int(F4 * EdgesNum): #EdgeBets = nx.edge_betweenness_centrality(G5) EdgeBets = nx.edge_betweenness_centrality(G5, k=1500) # Find the edge with Max edge betweenness uMax = -1; vMax = -1; betMax = -1.0; for ((u, v), bet) in EdgeBets.iteritems(): if bet > betMax: betMax = bet uMax = u vMax = v G5.remove_edge(uMax, vMax) i += 1 nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0) print F4, nD LRB.append(nD) G5.clear() print 'RA: ', LRA[0], LRA[1], LRA[2], LRA[3] print 'ID: ', LID[0], LID[1], LID[2], LID[3] print 'RD: ', LRD[0], LRD[1], LRD[2], LRD[3] print 'IB: ', LIB[0], LIB[1], LIB[2], LIB[3] print 'RB: ', LRB[0], LRB[1], LRB[2], LRB[3] if __name__ == "__main__": #EdgeAttackBA() #EdgeAttackUSAir() # Edge Attack Erdos971 Network # for random attack, we set the random seed to from 1 to 100 for the # independent 100 runs. For other deliberate attacks, as the attack order # is fixed, we reset the seed of random to the initial state, i.e. seed(None) #EdgeAttackErdosNetwork() # Regulatory #G = ReadPajek('./dataset/Regulatory/TRN-Yeast-1.net') #G = ReadPajek('./dataset/Regulatory/TRN-Yeast-2.net') #G = ReadPajek('./dataset/Regulatory/TRN-EC-2.net') #G = ReadPajek('./dataset/Regulatory/Ownership.net') # Trust G = ReadPajek('./dataset/Trust/WikiVote.net') # World Wide Web (WWW) #G = ReadPajek('./dataset/WWW/PoliticalBlogs.net') # Internet #G = ReadPajek('./dataset/Internet/P2P_1.net') #G = ReadPajek('./dataset/Internet/P2P_2.net') #G = ReadPajek('./dataset/Internet/P2P_3.net') # Citation #G = ReadPajek('./dataset/Citation/ArXivHepPh.net') #G = ReadPajek('./dataset/Citation/ArXivHepTh.net') print 'Edge Attack From Temp Temp Temp Flow Betweenness 2... ' print 'Trust --- Wikivote.net' NodesNum = G.number_of_nodes() EdgesNum = G.number_of_edges() DriverNodes = SC.control_nodes(G) nD = len(DriverNodes) / (NodesNum + 0.0) print 'Nodes Num: ', NodesNum print 'Edges Num: ', EdgesNum print 'nD = ', nD EdgeAttack(G)
#!/usr/bin/env python """ This script implements branching and tagging in the DOC group repository, and automates the process of creating sets. """ import os def parse_args (): from optparse import OptionParser parser = OptionParser ("usage: %prog [options] name") parser.add_option ("-A", "--ACE", dest="project", action="store_const", help="Branch/tag only ACE", default=None, const="ace") parser.add_option ("-T", "--TAO", dest="project", action="store_const", help="Branch/tag ACE and TAO", default=None, const="tao") parser.add_option ("-C", "--CIAO", dest="project", action="store_const", help="Branch/tag ACE, TAO, and CIAO", default=None, const="ciao") parser.add_option ("-t", "--tag", dest="action", help="Create a tag", action="store_true", default=None) parser.add_option ("-b", "--branch", dest="action", action="store_false", help="Create a branch", default=None) parser.add_option ("-v", "--verbose", dest="verbose", action="store_true", help="Print out verbose debugging output", default=False) parser.add_option ("-s", "--svn", dest="svn", default="svn", help="Full path to svn binary, if not in path") parser.add_option ("-r", "--repo", dest="repo", default="https://svn.dre.vanderbilt.edu/DOC/Middleware/", help="Repository to use, defaults to s.d.v.e/DOC/Middleware.") parser.add_option ("--src", dest="source", default="trunk/", help="Path in repository from which to branch, defaults to trunk") parser.add_option ("--dest", dest="dest", default="", help="Specifies a subdirectory of branches or tags in which " + "to place the new branch/tag. dest must already exist.") parser.add_option ("-n", dest="take_action", action="store_false", default=True, help="Take no action") (opts, args) = parser.parse_args () if len(args) != 1: parser.error ("must specify exactly one branch or tag name") if opts.action is None: parser.error ("must specify either a branch or tag action") if opts.project is None: parser.error ("must specity a project to branch") return (opts, args) def execute (command): from os import system if opts.verbose: print "executing " + command if opts.take_action and os.system (command) != 0: raise Exception ("Command failed: " + command) def svn_copy (source, dest): command = " ".join ([opts.svn, "copy", '-m "branching/tagging"', source, dest]) execute (command) def svn_propset (path, prop, value): command = " ".join ([opts.svn, "propset", prop, "'" + value + "'", path]) execute (command) def svn_mkdir (path): command = " ".join ([opts.svn, "mkdir", '-m "branching/tagging"', path]) execute (command) def svn_mkdir_local (path): command = " ".join ([opts.svn, "mkdir", path]) execute (command) def get_head_revision (url): command = " ".join ([opts.svn, "info", url]) import re lineregex = re.compile ("Last Changed Rev: (\d+)") for line in os.popen (command).readlines (): match = lineregex.match (line) if (match is not None): return int(match.group (1)) print "ERROR: Unable to find current MPC head revision" raise Exception def branch_ACE (): # Perform branching destination = opts.repo + opts.dest svn_copy (opts.repo + opts.source + "/ACE", destination + "modules/ACE") # pin MPC revision # Need local copy of the ACE directory to to the propset # execute ("svn up -N " + opts.repo + path + "/modules/ACE sets_manager_temp/module_ACE") execute ("svn up -N sets_manager_temp/modules/ACE") mpc_rev = get_head_revision ("svn://svn.dre.vanderbilt.edu/DOC/MPC/trunk") svn_propset ("sets_manager_temp/modules/ACE", "svn:externals", "%s\t-r %d %s" % ("MPC", mpc_rev, "svn://svn.dre.vanderbilt.edu/DOC/MPC/trunk")) #Create the set svn_mkdir_local ("sets_manager_temp/sets/ACE") svn_propset ("sets_manager_temp/sets/ACE", "svn:externals", "%s\t%s" % ("ACE_wrappers", destination + "modules/ACE")) def branch_TAO (): branch_ACE () # Perform branching destination = opts.repo + opts.dest svn_copy (opts.repo + opts.source + "/TAO", destination + "modules/TAO") #Create the set svn_mkdir_local ("sets_manager_temp/sets/ACE+TAO") svn_propset ("sets_manager_temp/sets/ACE+TAO", "svn:externals", "%s\t%s\n%s\t%s" % ("ACE_wrappers", destination + "modules/ACE", "ACE_wrappers/TAO", destination + "modules/TAO")) def branch_CIAO (): branch_TAO () #Perform branching destination = opts.repo + opts.dest svn_copy (opts.repo + opts.source + "/CIAO", destination + "modules/CIAO") # Create the set svn_mkdir_local ("sets_manager_temp/sets/ACE+TAO+CIAO") svn_propset ("sets_manager_temp/sets/ACE+TAO+CIAO", "svn:externals", "%s\t%s\n%s\t%s\n%s\t%s" % ("ACE_wrappers", destination + "modules/ACE", "ACE_wrappers/TAO", destination + "modules/TAO", "ACE_wrappers/TAO/CIAO", destination + "modules/CIAO")) def main (opts, args): # Lets make opts global globals ()['opts'] = opts path = str () if opts.action: # True for tag path = "tags/" else: # Branch path = "branches/" path += "%s/%s" % (opts.dest, args[0]) # Make branch/tag directory svn_mkdir (opts.repo + path) execute ("svn co " + opts.repo + path + " sets_manager_temp") # Make modules and sets subdirectory svn_mkdir_local ("sets_manager_temp/modules") svn_mkdir_local ("sets_manager_temp/sets") # commit the new directories execute ('svn commit -m "branching/tagging" sets_manager_temp') # opts.dest should now be set to path, all of the branching # functions assume dest now points to the branch/tag in which # the copies should be places opts.dest = path + '/' {'ace': branch_ACE, 'tao': branch_TAO, 'ciao': branch_CIAO}[opts.project] () # Commit the sets directory execute ('svn commit -m "branching/tagging" sets_manager_temp') # remove the sets directory for root, dirs, files in os.walk ('sets_manager_temp', False): for name in files: os.remove (os.path.join (root, name)) for name in dirs: os.rmdir (os.path.join (root, name)) if __name__ == "__main__": opts, args = parse_args () main (opts, args)
from __future__ import print_function, division import numpy as np import pandas as pd import pytest from sklearn.utils.validation import NotFittedError from sklearn.pipeline import Pipeline from nflwin import preprocessing class TestPipelines(object): """Testing if pipelining cleaning steps works.""" def test_map_to_int_to_onehot(self): fit_df = pd.DataFrame({"quarter": ["Q1", "Q1", "Q1", "Q2", "Q2"]}) transform_df = fit_df.copy() mti = preprocessing.MapToInt("quarter", copy=True) ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["quarter"], copy=True) pipe = Pipeline(steps=[("one", mti), ("two", ohe)]) pipe.fit(fit_df) output_df = pipe.transform(transform_df) expected_df = pd.DataFrame({"onehot_col1": [1.0, 1, 1, 0, 0], "onehot_col2": [0.0, 0, 0, 1, 1]}) pd.util.testing.assert_frame_equal(output_df, expected_df) class TestComputeElapsedTime(object): """Testing if we can properly map quarters and time elapsed to a total time elapsed.""" def test_bad_quarter_colname_produces_error(self): input_df = pd.DataFrame({"blahblahblah": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_bad_time_elapsed_colname_produces_error(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "blahblahblah": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_preexisting_output_colname_produces_error(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_time_elapsed": [0, 0, 0, 0, 0]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", total_time_colname="total_time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_incomplete_quarter_mapping(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", quarter_to_second_mapping={ "Q1": 0, "Q2": 900, "Q4": 2700, "OT1":3600} ) cet.fit(input_df) with pytest.raises(TypeError): cet.transform(input_df) def test_simple_working_case(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) transformed_df = cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 900, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_inplace_transform(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", copy=False) cet.fit(input_df) cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 900, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(input_df, expected_df) def test_custom_mapping(self): input_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", quarter_to_second_mapping={ "quarter1": 0, "Q2": 500, "Q3": 1800, "Q4": 2700, "OT1":3600}) cet.fit(input_df) transformed_df = cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 500, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) class TestComputeIfOffenseIsHome(object): """Testing if we can correctly compute if the offense is the home team.""" def test_bad_offense_colname_produces_error(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "blahblahblah": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_bad_home_team_colname_produces_error(self): input_df = pd.DataFrame({"blahblahblah": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_existing_offense_home_team_colname_produces_error(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_correct_answer_with_copy(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) expected_input_df = input_df.copy() expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"], "offense_home_team": [True, False, True]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="offense_home_team", copy=True) transformed_df = ciow.transform(input_df) pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_input_df.sort_index(axis=1)) pd.util.testing.assert_frame_equal(transformed_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1)) def test_correct_answer_without_copy(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"], "offense_home_team": [True, False, True]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="offense_home_team", copy=False) ciow.transform(input_df) pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1)) class TestMapToInt(object): """Testing if the integer mapper works.""" def test_fit_bad_colname_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("blahblahblah") with pytest.raises(KeyError): mti.fit(input_df) def test_mapping_without_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) expected_output = {"one": 0, "two": 1, "four": 2, "six": 3} assert mti.mapping == expected_output def test_mapping_with_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", np.nan, "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) expected_output = {"one": 0, "two": 1, "four": 2, "six": 3} assert mti.mapping == expected_output def test_transform_before_fit_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") with pytest.raises(NotFittedError): mti.transform(input_df) def test_transform_bad_colname_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transform_df = pd.DataFrame({"blahblahblah": ["one", "two", "one", "four", "six", "two", "one", "one"]}) with pytest.raises(KeyError): mti.transform(transform_df) def test_transform_without_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transformed_df = mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_transform_with_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", np.nan, "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transformed_df = mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, np.nan, 0]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_transform_inplace(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one", copy=False) mti.fit(input_df) mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]}) pd.util.testing.assert_frame_equal(input_df, expected_df) def test_transform_copy(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) expected_df = input_df.copy() mti = preprocessing.MapToInt("one", copy=True) mti.fit(input_df) transformed_data = mti.transform(input_df) pd.util.testing.assert_frame_equal(input_df, expected_df) class TestOneHotEncoderFromDataFrame(object): """Testing if the one-hot encoder wrapper works.""" def setup_method(self, method): self.data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) self.data = self.data[["one", "two", "three"]] def test_correct_dtype_passed(self): ohe = preprocessing.OneHotEncoderFromDataFrame(dtype=np.int) assert ohe.dtype == np.int def test_correct_handle_unknown_string_passed(self): ohe = preprocessing.OneHotEncoderFromDataFrame(handle_unknown="ignore") assert ohe.handle_unknown == "ignore" def test_encode_all_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names="all") ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 1, 1, 0], "onehot_col5": [0., 0, 0, 1], "onehot_col6": [1., 0, 1, 0], "onehot_col7": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_encode_some_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"]) ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"two": [2, 2, 2, 5], "onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 0, 1, 0], "onehot_col5": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_copy_data_works(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=True) ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_inplace_transform_works(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=False) data = self.data.copy() ohe.fit(self.data) ohe.transform(self.data) expected_data = pd.DataFrame({"two": [2, 2, 2, 5], "onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 0, 1, 0], "onehot_col5": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_encoding_subset_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=True) shifted_data = self.data[2:] ohe.fit(shifted_data) transformed_data = ohe.transform(shifted_data) self.data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) expected_data = pd.DataFrame({"two": [2, 5], "onehot_col1": [0., 1], "onehot_col2": [1., 0], "onehot_col3": [1., 0], "onehot_col4": [0., 1]}, index=[2, 3]) print(transformed_data) print(expected_data) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) class TestCreateScoreDifferential(object): """Testing if score differentials are properly created.""" def test_bad_home_score_colname(self): csd = preprocessing.CreateScoreDifferential("badcol", "away_score", "offense_home") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.transform(data) def test_bad_away_score_colname(self): csd = preprocessing.CreateScoreDifferential("home_score", "badcol", "offense_home") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_bad_offense_home_colname(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "badcol") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_differential_column_already_exists(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="used_col") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "used_col": [0, 0, 0, 0]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_differential_works_offense_is_home(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_works_offense_is_away(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [False, False, False, False]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [False, False, False, False], "score_diff": [9, -2, 2, 11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_works_offense_is_mix(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, False, False]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, False, False], "score_diff": [-9, 2, 2, 11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_with_copied_data(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff", copy=True) input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_transformed_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_input_data.sort_index(axis=1), input_data.sort_index(axis=1)) pd.util.testing.assert_frame_equal(expected_transformed_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_with_inplace_data(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff", copy=False) input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), input_data.sort_index(axis=1)) class TestCheckColumnNames(object): """Testing whether column names are properly checked.""" def test_transform_called_before_fit(self): ccn = preprocessing.CheckColumnNames() data = pd.DataFrame() with pytest.raises(NotFittedError): ccn.transform(data) def test_transform_data_has_wrong_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4]}) ccn.fit(input_data) test_data = pd.DataFrame({"one": [1, 2], "three": [3, 4]}) with pytest.raises(KeyError): ccn.transform(test_data) def test_transform_reorders_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4], "three": [5, 6]}) test_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12]}) expected_data = test_data.copy() #Ensure columns are in a particular order: input_data = input_data[["one", "two", "three"]] test_data = test_data[["two", "one", "three"]] expected_data = expected_data[["one", "two", "three"]] with pytest.raises(AssertionError): pd.util.testing.assert_frame_equal(test_data, expected_data) ccn.fit(input_data) pd.util.testing.assert_frame_equal(ccn.transform(test_data), expected_data) def test_transform_drops_unnecessary_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4], "three": [5, 6]}) test_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12], "four": [13, 14]}) expected_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12]}) #Ensure columns are in a particular order: input_data = input_data[["one", "two", "three"]] expected_data = expected_data[["one", "two", "three"]] ccn.fit(input_data) pd.util.testing.assert_frame_equal(ccn.transform(test_data), expected_data) def test_transform_with_user_specified_colums(self): ccn = preprocessing.CheckColumnNames(column_names=["c", "b", "a"]) input_data = pd.DataFrame({"e": [-2, -1, 0], "a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9], "d": [10, 11, 12]}) expected_data = pd.DataFrame({"c": [7, 8, 9], "b": [4, 5, 6], "a": [1, 2, 3]}) expected_data = expected_data[["c", "b", "a"]] transformed_data = ccn.transform(input_data) pd.util.testing.assert_frame_equal(expected_data, transformed_data)
#!/usr/bin/env python # Copyright 2013 Brett Slatkin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements authentication for the API server and frontend.""" import datetime import functools import json import logging import time import urllib import urllib2 # Local libraries import flask from flask import abort, g, redirect, render_template, request, url_for from flask.ext.login import ( confirm_login, current_user, fresh_login_required, login_fresh, login_required, login_user, logout_user) # Local modules from . import app from . import db from . import login from dpxdt.server import forms from dpxdt.server import models from dpxdt.server import operations from dpxdt.server import utils GOOGLE_OAUTH2_AUTH_URL = 'https://accounts.google.com/o/oauth2/auth' GOOGLE_OAUTH2_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token' GOOGLE_OAUTH2_USERINFO_URL = 'https://www.googleapis.com/oauth2/v1/userinfo' GOOGLE_OAUTH2_SCOPES = 'https://www.googleapis.com/auth/userinfo.email' FETCH_TIMEOUT_SECONDS = 60 @login.user_loader def load_user(user_id): user = operations.UserOps(user_id).load() if user and user.is_authenticated(): logging.debug('Authenticated as user=%r', user.get_id()) return user @app.context_processor def auth_context(): """Adds extra default context for rendered templates.""" return dict(current_user=current_user) @app.route('/login') def login_view(): next_url = request.args.get('next', default='/', type=str) if app.config.get('IGNORE_AUTH'): fake_id = 'anonymous_superuser' anonymous_superuser = models.User.query.get(fake_id) if not anonymous_superuser: anonymous_superuser = models.User( id=fake_id, email_address='[email protected]', superuser=True) db.session.add(anonymous_superuser) db.session.commit() login_user(anonymous_superuser) confirm_login() return redirect(next_url) # Inspired by: # http://stackoverflow.com/questions/9499286 # /using-google-oauth2-with-flask params = dict( response_type='code', client_id=app.config['GOOGLE_OAUTH2_CLIENT_ID'], redirect_uri=app.config['GOOGLE_OAUTH2_REDIRECT_URI'], scope=GOOGLE_OAUTH2_SCOPES, state=urllib.quote(next_url), ) if app.config['GOOGLE_OAUTH2_HOSTED_DOMAIN'] is not None: params['hd'] = app.config['GOOGLE_OAUTH2_HOSTED_DOMAIN'] target_url = '%s?%s' % ( GOOGLE_OAUTH2_AUTH_URL, urllib.urlencode(params)) logging.debug('Redirecting user to login at url=%r', target_url) return redirect(target_url) @app.route('/logout') @login_required def logout(): logout_user() return redirect(url_for('homepage')) @app.route('/oauth2callback') def login_auth(): # TODO: Handle when the 'error' parameter is present params = dict( code=request.args.get('code'), client_id=app.config['GOOGLE_OAUTH2_CLIENT_ID'], client_secret=app.config['GOOGLE_OAUTH2_CLIENT_SECRET'], redirect_uri=app.config['GOOGLE_OAUTH2_REDIRECT_URI'], grant_type='authorization_code' ) payload = urllib.urlencode(params) logging.debug('Posting for token to url=%r, payload=%r', GOOGLE_OAUTH2_TOKEN_URL, payload) fetch_request = urllib2.Request(GOOGLE_OAUTH2_TOKEN_URL, payload) conn = urllib2.urlopen(fetch_request, timeout=FETCH_TIMEOUT_SECONDS) data = conn.read() result_dict = json.loads(data) params = dict( access_token=result_dict['access_token'] ) payload = urllib.urlencode(params) target_url = '%s?%s' % (GOOGLE_OAUTH2_USERINFO_URL, payload) logging.debug('Fetching user info from url=%r', target_url) fetch_request = urllib2.Request(target_url) conn = urllib2.urlopen(fetch_request, timeout=FETCH_TIMEOUT_SECONDS) data = conn.read() result_dict = json.loads(data) logging.debug('Result user info dict: %r', result_dict) email_address = result_dict['email'] if not result_dict['verified_email']: abort(flask.Response('Your email address must be verified', 403)) user_id = '%s:%s' % (models.User.GOOGLE_OAUTH2, result_dict['id']) user = models.User.query.get(user_id) if not user: user = models.User(id=user_id) # Email address on the account may change, user ID will stay the same. # Do not allow the user to claim existing build invitations with their # old email address. if user.email_address != email_address: user.email_address = email_address user.last_seen = datetime.datetime.utcnow() db.session.add(user) db.session.commit() login_user(user) confirm_login() # Clear all flashed messages from the session on login. flask.get_flashed_messages() final_url = urllib.unquote(request.args.get('state')) logging.debug('User is logged in. Redirecting to url=%r', final_url) return redirect(final_url) @app.route('/whoami') @login_required def debug_login(): return render_template( 'whoami.html', user=current_user) def superuser_required(f): """Requires the requestor to be a super user.""" @functools.wraps(f) @login_required def wrapped(*args, **kwargs): if not (current_user.is_authenticated() and current_user.superuser): abort(403) return f(*args, **kwargs) return wrapped def can_user_access_build(param_name): """Determines if the current user can access the build ID in the request. Args: param_name: Parameter name to use for getting the build ID from the request. Will fetch from GET or POST requests. Returns: The build the user has access to. """ build_id = ( request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name]) if not build_id: logging.debug('Build ID in param_name=%r was missing', param_name) abort(400) ops = operations.UserOps(current_user.get_id()) build, user_is_owner = ops.owns_build(build_id) if not build: logging.debug('Could not find build_id=%r', build_id) abort(404) if current_user.is_authenticated() and not user_is_owner: # Assume the user should be able to access the build but can't because # the cache is out of date. This forces the cache to repopulate, any # outstanding user invitations to be completed, hopefully resulting in # the user having access to the build. ops.evict() claim_invitations(current_user) build, user_is_owner = ops.owns_build(build_id) if not user_is_owner: if current_user.is_authenticated() and current_user.superuser: pass elif request.method != 'GET': logging.debug('No way to log in user via modifying request') abort(403) elif build.public: pass elif current_user.is_authenticated(): logging.debug('User does not have access to this build') abort(flask.Response('You cannot access this build', 403)) else: logging.debug('Redirecting user to login to get build access') abort(login.unauthorized()) elif not login_fresh(): logging.debug('User login is old; forcing refresh') abort(login.needs_refresh()) return build def build_access_required(function_or_param_name): """Decorator ensures user has access to the build ID in the request. May be used in two ways: @build_access_required def my_func(build): ... @build_access_required('custom_build_id_param') def my_func(build): ... Always calls the given function with the models.Build entity as the first positional argument. """ def get_wrapper(param_name, f): @functools.wraps(f) def wrapped(*args, **kwargs): g.build = can_user_access_build(param_name) if not utils.is_production(): # Insert a sleep to emulate page loading in production. time.sleep(0.5) return f(*args, **kwargs) return wrapped if isinstance(function_or_param_name, basestring): return lambda f: get_wrapper(function_or_param_name, f) else: return get_wrapper('id', function_or_param_name) def _get_api_key_ops(): """Gets the operations.ApiKeyOps instance for the current request.""" auth_header = request.authorization if not auth_header: logging.debug('API request lacks authorization header') abort(flask.Response( 'API key required', 401, {'WWW-Authenticate': 'Basic realm="API key required"'})) return operations.ApiKeyOps(auth_header.username, auth_header.password) def current_api_key(): """Determines the API key for the current request. Returns: The ApiKey instance. """ if app.config.get('IGNORE_AUTH'): return models.ApiKey( id='anonymous_superuser', secret='', superuser=True) ops = _get_api_key_ops() api_key = ops.get() logging.debug('Authenticated as API key=%r', api_key.id) return api_key def can_api_key_access_build(param_name): """Determines if the current API key can access the build in the request. Args: param_name: Parameter name to use for getting the build ID from the request. Will fetch from GET or POST requests. Returns: (api_key, build) The API Key and the Build it has access to. """ build_id = ( request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name]) utils.jsonify_assert(build_id, 'build_id required') if app.config.get('IGNORE_AUTH'): api_key = models.ApiKey( id='anonymous_superuser', secret='', superuser=True) build = models.Build.query.get(build_id) utils.jsonify_assert(build is not None, 'build must exist', 404) else: ops = _get_api_key_ops() api_key, build = ops.can_access_build(build_id) return api_key, build def build_api_access_required(f): """Decorator ensures API key has access to the build ID in the request. Always calls the given function with the models.Build entity as the first positional argument. """ @functools.wraps(f) def wrapped(*args, **kwargs): g.api_key, g.build = can_api_key_access_build('build_id') return f(*args, **kwargs) return wrapped def superuser_api_key_required(f): """Decorator ensures only superuser API keys can request this function.""" @functools.wraps(f) def wrapped(*args, **kwargs): api_key = current_api_key() g.api_key = api_key utils.jsonify_assert( api_key.superuser, 'API key=%r must be a super user' % api_key.id, 403) return f(*args, **kwargs) return wrapped @app.route('/api_keys', methods=['GET', 'POST']) @fresh_login_required @build_access_required('build_id') def manage_api_keys(): """Page for viewing and creating API keys.""" build = g.build create_form = forms.CreateApiKeyForm() if create_form.validate_on_submit(): api_key = models.ApiKey() create_form.populate_obj(api_key) api_key.id = utils.human_uuid() api_key.secret = utils.password_uuid() save_admin_log(build, created_api_key=True, message=api_key.id) db.session.add(api_key) db.session.commit() logging.info('Created API key=%r for build_id=%r', api_key.id, build.id) return redirect(url_for('manage_api_keys', build_id=build.id)) create_form.build_id.data = build.id api_key_query = ( models.ApiKey.query .filter_by(build_id=build.id) .order_by(models.ApiKey.created.desc()) .limit(1000)) revoke_form_list = [] for api_key in api_key_query: form = forms.RevokeApiKeyForm() form.id.data = api_key.id form.build_id.data = build.id form.revoke.data = True revoke_form_list.append((api_key, form)) return render_template( 'view_api_keys.html', build=build, create_form=create_form, revoke_form_list=revoke_form_list) @app.route('/api_keys.revoke', methods=['POST']) @fresh_login_required @build_access_required('build_id') def revoke_api_key(): """Form submission handler for revoking API keys.""" build = g.build form = forms.RevokeApiKeyForm() if form.validate_on_submit(): api_key = models.ApiKey.query.get(form.id.data) if api_key.build_id != build.id: logging.debug('User does not have access to API key=%r', api_key.id) abort(403) api_key.active = False save_admin_log(build, revoked_api_key=True, message=api_key.id) db.session.add(api_key) db.session.commit() ops = operations.ApiKeyOps(api_key.id, api_key.secret) ops.evict() return redirect(url_for('manage_api_keys', build_id=build.id)) def claim_invitations(user): """Claims any pending invitations for the given user's email address.""" # See if there are any build invitations present for the user with this # email address. If so, replace all those invitations with the real user. invitation_user_id = '%s:%s' % ( models.User.EMAIL_INVITATION, user.email_address) invitation_user = models.User.query.get(invitation_user_id) if invitation_user: invited_build_list = list(invitation_user.builds) if not invited_build_list: return db.session.add(user) logging.debug('Found %d build admin invitations for id=%r, user=%r', len(invited_build_list), invitation_user_id, user) for build in invited_build_list: build.owners.remove(invitation_user) if not build.is_owned_by(user.id): build.owners.append(user) logging.debug('Claiming invitation for build_id=%r', build.id) save_admin_log(build, invite_accepted=True) else: logging.debug('User already owner of build. ' 'id=%r, build_id=%r', user.id, build.id) db.session.add(build) db.session.delete(invitation_user) db.session.commit() # Re-add the user to the current session so we can query with it. db.session.add(current_user) @app.route('/admins', methods=['GET', 'POST']) @fresh_login_required @build_access_required('build_id') def manage_admins(): """Page for viewing and managing build admins.""" build = g.build # Do not show cached data db.session.add(build) db.session.refresh(build) add_form = forms.AddAdminForm() if add_form.validate_on_submit(): invitation_user_id = '%s:%s' % ( models.User.EMAIL_INVITATION, add_form.email_address.data) invitation_user = models.User.query.get(invitation_user_id) if not invitation_user: invitation_user = models.User( id=invitation_user_id, email_address=add_form.email_address.data) db.session.add(invitation_user) db.session.add(build) db.session.add(invitation_user) db.session.refresh(build, lockmode='update') build.owners.append(invitation_user) save_admin_log(build, invited_new_admin=True, message=invitation_user.email_address) db.session.commit() logging.info('Added user=%r as owner to build_id=%r', invitation_user.id, build.id) return redirect(url_for('manage_admins', build_id=build.id)) add_form.build_id.data = build.id revoke_form_list = [] for user in build.owners: form = forms.RemoveAdminForm() form.user_id.data = user.id form.build_id.data = build.id form.revoke.data = True revoke_form_list.append((user, form)) return render_template( 'view_admins.html', build=build, add_form=add_form, revoke_form_list=revoke_form_list) @app.route('/admins.revoke', methods=['POST']) @fresh_login_required @build_access_required('build_id') def revoke_admin(): """Form submission handler for revoking admin access to a build.""" build = g.build form = forms.RemoveAdminForm() if form.validate_on_submit(): user = models.User.query.get(form.user_id.data) if not user: logging.debug('User being revoked admin access does not exist.' 'id=%r, build_id=%r', form.user_id.data, build.id) abort(400) if user == current_user: logging.debug('User trying to remove themself as admin. ' 'id=%r, build_id=%r', user.id, build.id) abort(400) db.session.add(build) db.session.add(user) db.session.refresh(build, lockmode='update') db.session.refresh(user, lockmode='update') user_is_owner = build.owners.filter_by(id=user.id) if not user_is_owner: logging.debug('User being revoked admin access is not owner. ' 'id=%r, build_id=%r.', user.id, build.id) abort(400) build.owners.remove(user) save_admin_log(build, revoked_admin=True, message=user.email_address) db.session.commit() operations.UserOps(user.get_id()).evict() return redirect(url_for('manage_admins', build_id=build.id)) def save_admin_log(build, **kwargs): """Saves an action to the admin log.""" message = kwargs.pop('message', None) release = kwargs.pop('release', None) run = kwargs.pop('run', None) if not len(kwargs) == 1: raise TypeError('Must specify a LOG_TYPE argument') log_enum = kwargs.keys()[0] log_type = getattr(models.AdminLog, log_enum.upper(), None) if not log_type: raise TypeError('Bad log_type argument: %s' % log_enum) if current_user.is_anonymous(): user_id = None else: user_id = current_user.get_id() log = models.AdminLog( build_id=build.id, log_type=log_type, message=message, user_id=user_id) if release: log.release_id = release.id if run: log.run_id = run.id log.release_id = run.release_id db.session.add(log) @app.route('/activity') @fresh_login_required @build_access_required('build_id') def view_admin_log(): """Page for viewing the log of admin activity.""" build = g.build # TODO: Add paging log_list = ( models.AdminLog.query .filter_by(build_id=build.id) .order_by(models.AdminLog.created.desc()) .all()) return render_template( 'view_admin_log.html', build=build, log_list=log_list)
#!/usr/bin/env python # # This script defines variables and functions which are needed for # plotting purposes and for using WriteRegularization.py which forms a # regularization matrix by collocation # # Variables in all caps are required for other scripts to run and # this script must also define the slip and fluidity basis functions # from __future__ import division from spectral.bspline import augmented_knots from spectral.bspline import natural_knots from spectral.bspline import bspline_nd from modest import linear_to_array_index from modest import Perturb import transform as trans import pickle import numpy as np ## Define parameters for slip basis function geometry ###################################################################### FAULT_ANCHOR = [[-116.0,32.0]] FAULT_LENGTH = [50000.0] FAULT_WIDTH = [20000.0] FAULT_STRIKE = [0.0] FAULT_DIP = [60.0] FAULT_NLENGTH = [10] FAULT_NWIDTH = [4] FAULT_ORDER = [[0,0]] FLUIDITY_ANCHOR = [-119.25,35.0] FLUIDITY_STRIKE = 90.0 FLUIDITY_LENGTH = 600000.0 FLUIDITY_WIDTH = 600000.0 FLUIDITY_THICKNESS = 150000.0 FLUIDITY_NLENGTH = 1 FLUIDITY_NWIDTH = 1 FLUIDITY_NTHICKNESS = 5 FLUIDITY_ORDER = [0,0,3] ###################################################################### FAULT_N = sum(l*w for l,w in zip(FAULT_NLENGTH,FAULT_NWIDTH)) FLUIDITY_N = FLUIDITY_NLENGTH*FLUIDITY_NWIDTH*FLUIDITY_NTHICKNESS FAULT_SEGMENTS = len(FAULT_ANCHOR) FAULT_TRANSFORMS = [] FAULT_KNOTS = [] BASEMAP = pickle.load(open('basemap.pkl','r')) # find knots for faults for d in range(FAULT_SEGMENTS): xc,yc = BASEMAP(*FAULT_ANCHOR[d]) t = trans.point_stretch([FAULT_LENGTH[d],FAULT_WIDTH[d],1.0]) t += trans.point_rotation_x(FAULT_DIP[d]*np.pi/180) t += trans.point_rotation_z(np.pi/2.0 - FAULT_STRIKE[d]*np.pi/180) t += trans.point_translation([xc,yc,0.0]) # create knots defining B-splines for slip on a rectangle x = [0,1] # and y = [-1,0] fault_knots_x = natural_knots(FAULT_NLENGTH[d], FAULT_ORDER[d][0],side='both') fault_knots_y = natural_knots(FAULT_NWIDTH[d], FAULT_ORDER[d][1],side='both') - 1.0 FAULT_TRANSFORMS += [t] FAULT_KNOTS += [(fault_knots_x,fault_knots_y)] # find knots for fluidity xc,yc = BASEMAP(*FLUIDITY_ANCHOR) t = trans.point_stretch([FLUIDITY_LENGTH,FLUIDITY_WIDTH,FLUIDITY_THICKNESS]) t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180) t += trans.point_translation([xc,yc,0.0]) fluidity_knots_x = natural_knots(FLUIDITY_NLENGTH, FLUIDITY_ORDER[0],side='both') fluidity_knots_y = natural_knots(FLUIDITY_NWIDTH, FLUIDITY_ORDER[1],side='both') - 1.0 fluidity_knots_z = natural_knots(FLUIDITY_NTHICKNESS, FLUIDITY_ORDER[2],side='none') - 1.0 FLUIDITY_TRANSFORM = t FLUIDITY_KNOTS = (fluidity_knots_x,fluidity_knots_y,fluidity_knots_z) def slip(x,coeff,segment=None,diff=None): ''' takes positions, x, and slip coefficients, coeff, and returns the vaues for slip. The segment key word is specified to only use coefficients corresponding to the specified fault segment. if no segment is specified then all coefficients will be used ''' minN = 0 s = segment out = np.zeros(len(x)) assert len(coeff) == FAULT_N, ( 'coefficient list must have length %s' % FAULT_N) if s is None: for d in range(FAULT_SEGMENTS): t = FAULT_TRANSFORMS[d].inverse() fx = t(x)[:,[0,1]] shape = FAULT_NLENGTH[d],FAULT_NWIDTH[d] order = FAULT_ORDER[d] maxN = minN + np.prod(shape) for n in range(minN,maxN): idx = linear_to_array_index(n-minN,shape) out += coeff[n]*bspline_nd(fx,FAULT_KNOTS[d],idx,order,diff=diff) minN += np.prod(shape) else: for d in range(s): shape = FAULT_NLENGTH[d],FAULT_NWIDTH[d] maxN = minN + np.prod(shape) minN += np.prod(shape) shape = FAULT_NLENGTH[s],FAULT_NWIDTH[s] maxN = minN + np.prod(shape) t = FAULT_TRANSFORMS[s].inverse() fx = t(x)[:,[0,1]] order = FAULT_ORDER[s] for n in range(minN,maxN): idx = linear_to_array_index(n-minN,shape) out += coeff[n]*bspline_nd(fx,FAULT_KNOTS[s],idx,order,diff=diff) minN += np.prod(shape) return out def fluidity(x,coeff,diff=None): out = np.zeros(len(x)) t = FLUIDITY_TRANSFORM.inverse() fx = t(x) shape = FLUIDITY_NLENGTH,FLUIDITY_NWIDTH,FLUIDITY_NTHICKNESS order = FLUIDITY_ORDER for n in range(FLUIDITY_N): idx = linear_to_array_index(n,shape) out += coeff[n]*bspline_nd(fx,FLUIDITY_KNOTS,idx,order,diff=diff) return out if __name__ == '__main__': from tplot.xsection import XSection import mayavi.mlab bm = BASEMAP sta_array = np.loadtxt('stations.txt',dtype=str) sta_pos = np.array(sta_array[:,[1,2]],dtype=float) sta_pos_x,sta_pos_y = bm(sta_pos[:,0],sta_pos[:,1]) fluidity_transforms = [] x,y = bm(*FLUIDITY_ANCHOR[:2]) length = FLUIDITY_LENGTH width = FLUIDITY_WIDTH thickness = FLUIDITY_THICKNESS t = trans.point_stretch([FLUIDITY_LENGTH, FLUIDITY_THICKNESS, 1.0]) t += trans.point_rotation_x(np.pi/2.0) t += trans.point_translation([0.0,-width/2.0,0.0]) t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180) t += trans.point_translation([x,y,0.0]) fluidity_transforms += [t] t = trans.point_stretch([FLUIDITY_WIDTH, FLUIDITY_THICKNESS, 1.0]) t += trans.point_rotation_x(np.pi/2.0) t += trans.point_rotation_z(-np.pi/2.0) t += trans.point_translation([FLUIDITY_LENGTH/2.0, 0.0, 0.0]) t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180) t += trans.point_translation([x,y,0.0]) fluidity_transforms += [t] xs1 = XSection(fluidity, f_args=(np.random.random(FLUIDITY_N),), base_square_y=(-1,0), transforms = fluidity_transforms, clim = (0,1)) xs2 = XSection(fluidity, f_args=(np.random.random(FLUIDITY_N),), base_square_y=(-1,0), transforms = FAULT_TRANSFORMS) xs1.draw() xs2.draw(color=(0.2,0.2,0.2),opacity=0.5) mayavi.mlab.points3d(sta_pos_x,sta_pos_y,0*sta_pos[:,1],scale_factor=10000) xs1.view() coeff = np.random.random(FAULT_N) xs1 = XSection(slip, f_args=(coeff,), base_square_y=(-1,0), transforms = FAULT_TRANSFORMS, clim=(0,1)) xs1.draw() xs1.view() coeff = np.random.random(FLUIDITY_N)
# modified from https://github.com/goberoi/cloudy_vision import cv2 from jinja2 import FileSystemLoader, Environment import json import numpy import time import os import pprint import shutil import time import vendors.google import vendors.microsoft import vendors.clarifai_ import vendors.ibm import vendors.cloudsight_ SETTINGS = None def settings(name): """Fetch a settings parameter.""" # Initialize settings if necessary. global SETTINGS if SETTINGS is None: # Change this dict to suit your taste. SETTINGS = { 'api_keys_filepath' : './api_keys.json', 'input_images_dir' : 'input_images', 'output_dir' : 'output', 'static_dir' : 'static', 'output_image_height' : 200, 'vendors' : { # 'google' : vendors.google, # 'msft' : vendors.microsoft, # 'clarifai' : vendors.clarifai_ # 'ibm' : vendors.ibm, 'cloudsight' : vendors.cloudsight_ } } # Load API keys with open(SETTINGS['api_keys_filepath']) as data_file: SETTINGS['api_keys'] = json.load(data_file) return SETTINGS[name] def log_status(filepath, vendor_name, msg): filename = os.path.basename(filepath) print("%s -> %s" % ((filename + ", " + vendor_name).ljust(40), msg)) def resize_and_save(input_image_filepath, output_image_filepath): image = cv2.imread(input_image_filepath) height = image.shape[0] width = image.shape[1] aspect_ratio = float(width) / float(height) new_height = settings('output_image_height') new_width = int(aspect_ratio * new_height) output_image = cv2.resize(image, (new_width, new_height)) cv2.imwrite(output_image_filepath, output_image) def render_from_template(directory, template_name, **kwargs): loader = FileSystemLoader(directory) env = Environment(loader=loader) template = env.get_template(template_name) return template.render(**kwargs) def process_all_images(): image_results = [] # Create the output directory if not os.path.exists(settings('output_dir')): os.makedirs(settings('output_dir')) # Loop through all input images. for filename in os.listdir(settings('input_images_dir')): # Only process files that have these image extensions. if not filename.endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')): continue # Create a full path so we can read these files. filepath = os.path.join(settings('input_images_dir'), filename) # Create an output object for the image image_result = { 'input_image_filepath' : filepath, 'output_image_filepath' : filename, 'vendors' : [] } image_results.append(image_result) # Walk through all vendor APIs to call. for vendor_name, vendor_module in sorted(settings('vendors').iteritems(), reverse=True): # Figure out filename to store and retrive cached JSON results. output_json_filename = filename + "." + vendor_name + ".json" output_json_path = os.path.join(settings('output_dir'), output_json_filename) # And where to store the output image output_image_filepath = os.path.join(settings('output_dir'), filename) # Check if the call is already cached. # if os.path.isfile(output_json_path): # # If so, read the result from the .json file stored in the output dir. # log_status(filepath, vendor_name, "skipping API call, already cached") # with open(output_json_path, 'r') as infile: # raw_api_result = infile.read() # else: # If not, make the API call for this particular vendor. log_status(filepath, vendor_name, "calling API") raw_api_result = vendor_module.call_vision_api(filepath, settings('api_keys')) print(raw_api_result) # And cache the result in a .json file log_status(filepath, vendor_name, "success, storing result in %s" % output_json_path) with open(output_json_path, 'w') as outfile: outfile.write(raw_api_result) # Resize the original image and write to an output filename log_status(filepath, vendor_name, "writing output image in %s" % output_image_filepath) resize_and_save(filepath, output_image_filepath) # Sleep so we avoid hitting throttling limits time.sleep(1) # Parse the JSON result we fetched (via API call or from cache) api_result = json.loads(raw_api_result) standardized_result = vendor_module.get_standardized_result(api_result) image_result['vendors'].append({ 'api_result' : api_result, 'vendor_name' : vendor_name, 'standardized_result' : standardized_result, 'output_json_filename' : output_json_filename }) # Render HTML file with all results. output_html = render_from_template('.', os.path.join(settings('static_dir'), 'template.html'), image_results=image_results) # Write HTML output. output_html_filepath = os.path.join(settings('output_dir'), 'output.html') with open(output_html_filepath, 'w') as output_html_file: output_html_file.write(output_html) # print vendors.clarifai_.get_acces_token(settings('api_keys')) def run(): index=0 video_capture = cv2.VideoCapture(0) video_capture.set(3,640) video_capture.set(4,480) # faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") while(True): # Capture frame-by-frame ret, frame = video_capture.read() # cv2.imshow('Webcam', frame) # if cv2.waitKey(1) & 0xFF == ord('p'): # clickPhoto = True # # if ((cv2.waitKey(1) & 0xFF )== ord('p')): # print("ewqreqwr") # cv2.imwrite('testImage %s.jpg' %(str(index)), crop_img) # index+=1 # process_all_images() cv2.imwrite('input_images/testimage.jpg', frame) t1=time.time() process_all_images() print("time for frame = ",time.time()-t1) index+=1 # if(index<4): # time.sleep(3) # continue # break # cv2.waitKey broken systemwide locally # if (cv2.waitKey(1) & 0xFF) == ord('q'): # break # When everything done, release the capture video_capture.release() cv2.destroyAllWindows() if __name__ == "__main__": run()
from __future__ import division import os import sys import time from multiprocessing import Process, Pipe, Pool from math import pi import numpy as np import matplotlib.pyplot as plt from pylab import get_current_fig_manager import networkx as nx import win32api import win32con from screen_capture.localize_map import LocalizeMap from screen_capture.capture import Capture, find_best_image from planning.astar.local_graph import plan_path from smoothing.gd import smooth_graph, graph_to_path from control.robot_control import particles, robot from utils import root from planning.astar.global_map import (plot_map, GlobalMap, MIN_UNCONTRAINED_PENALTY) key_map = { win32con.VK_SPACE: 'break', win32con.VK_UP: 'up', win32con.VK_DOWN: 'down', win32con.VK_RIGHT: 'right', win32con.VK_LEFT: 'left', } inv_key_map = dict((v,k) for k, v in key_map.iteritems()) gtime = time.time() class AsyncFactory: def __init__(self, func, cb_func): self.func = func self.cb_func = cb_func self.pool = Pool(processes=4) def call(self,*args, **kwargs): return self.pool.apply_async(self.func, args, kwargs, self.cb_func) def wait(self): self.pool.close() self.pool.join() def _key_down(key): #print key_map[key], "pressed at", time.time() - gtime win32api.keybd_event(key, 0, 0, 0) def _key_up(key): #print key_map[key], "lifted at", time.time() -gtime win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP ,0) def press(cmd, timeout): key = inv_key_map[cmd] _key_down(key) #print "PID: %d \t Value: %d \t Sleep: %d" % (os.getpid(), x ,sleep_duration) time.sleep(timeout) return key def cb_func(key): _key_up(key) class NavigateProcess(Process): def __init__(self, connec, *args, **kwargs): self.connec = connec map_filename = os.path.join(root, 'flash', 'fft2', 'processed', 'aligned_localization_data_map.png') car_filename = os.path.join(root, 'flash', 'fft2', 'export', 'images', '445.png') self.mapper = LocalizeMap(map_filename, car_filename) filename = os.path.join(root, 'flash', 'fft2', 'processed', 'level1_start.png') self.c = Capture(filename) #default starting value self.start_pos = [2650, 2650] #level1 #self.goal_pos = [1900, 400] #leve2 self.goal_pos = [1252, 1476] #from twiddle weight_data = 1.1 weight_smooth = 0.2 self.p_gain = .5 self.d_gain = 0.0 self.steering_noise = 5 self.distance_noise = 5 self.measurement_noise = 0.0005 self.speed = 2 #planning print "planning..." graph_path = plan_path(self.start_pos, self.goal_pos) #extract points from graph path_pos = nx.get_node_attributes(graph_path, 'pos') #smooth print "smoothing..." sg = smooth_graph(graph_path, self.start_pos, self.goal_pos, False, weight_data, weight_smooth) #extract points from ad smoothed graph sg_pos = nx.get_node_attributes(sg, 'pos') #convert graph to spath self.spath = graph_to_path(sg) #plot smoothed path on a graph nx.draw(sg, sg_pos, node_size=5, edge_color='r') #self.async_steering = AsyncFactory(press, cb_func) Process.__init__(self, *args, **kwargs) def run(self): async_steering = AsyncFactory(press, cb_func) prev_map_box = None mg = nx.DiGraph() myrobot = robot() template = self.c.snap_gray() #map_box = self.mapper.localize(template, None) #center = ((map_box[0] + map_box[2])/2, (map_box[1] + map_box[3])/2) map_box, center = self.mapper.extended_localize(template, None) #this is approximate sensor measurement self.start_pos = (center[0], center[1]) myrobot.set(self.start_pos[0], self.start_pos[1], pi/2) mg.add_node(0, pos=(myrobot.x, myrobot.y)) myrobot.set_noise(0,0,0) pfilter = particles(myrobot.x, myrobot.y, myrobot.orientation, self.steering_noise, self.distance_noise, self.measurement_noise) cte = 0.0 err = 0.0 N = 0 fps = 25 index = 0 # index into the path async_res = [] while not myrobot.check_goal(self.goal_pos): start_time = time.time() diff_cte = -cte # ---------------------------------------- # compute the CTE template = self.c.snap_gray() map_box, center = self.mapper.extended_localize(template, prev_map_box) prev_map_box = map_box (x0, y0, x1, y1) = map_box (x, y) = (center[0], center[1]) estimate = (x, y) #find the rigt spath while True: x1, y1 = self.spath[index] Rx = x - x1 Ry = y - y1 x2, y2 = self.spath[index + 1] dx = x2 - x1 dy = y2 - y1 u = abs(Rx*dx + Ry*dy)/(dx*dx + dy*dy) if u > 1 and index < (len(self.spath) - 2): index +=1 print "index change", index else: spath = (x1, y1, x2, y2) break cte = (Ry * dx - Rx * dy) / (dx * dx + dy * dy) diff_cte += cte steer = - self.p_gain * cte - self.d_gain * diff_cte myrobot, cmds = myrobot.move(steer, self.speed, real=True, fps = fps) print cte #wait for the previous commands to complete before issuing new ones for r in async_res: #retyrn when the call is completed r.ready() #async_steering.wait() async_res = [] for cmd, timeout in cmds: print cte, steer, cmd[0], fps async_res.append(async_steering.call(cmd, timeout)) pending = True #pfilter.move(steer, self.speed) #sense #pfilter.sense(Z) err += (cte ** 2) N += 1 robot_pos = (myrobot.x, myrobot.y) #mg.add_node(N, pos=(myrobot.x, myrobot.y)) #mg.add_edge(N-1, N) #send update to matplotlib time_pos = (time.time(), map_box, estimate, robot_pos, spath) self.connec.send(time_pos) end_time = time.time() #fps fps = 1/(end_time-start_time) #print "%2d frames per sec\r" % fps, time.sleep(0.01) def main(): plot_map() thismanager = get_current_fig_manager() thismanager.window.wm_geometry("+700+0") plt.gca().set_title("Running...") plt.ion() conn1, conn2 = Pipe() data_stream = NavigateProcess(conn1) data_stream.start() #plt.gca().set_xlim([0, 2800]) #plt.gca().set_ylim([0, 2800]) map_box = None while True: if not(conn2.poll(0.1)): if not(data_stream.is_alive()): break else: continue (sent_time, map_box, estimate, robot_pos, spath) = conn2.recv() while (time.time() - sent_time) > 1/20: #we are getting behind by more then a sec (sent_time, map_box, estimate, robot_pos, spath) = conn2.recv() if map_box is not None: (x0, y0, x1, y1) = map_box plt.gca().set_xlim([x0, x1]) plt.gca().set_ylim([y1, y0]) #new_position = (max_loc[0] + w/2, max_loc[1] + h/2) plt.scatter( [(x0 + x1)/2], [(y0 + y1)/2],) plt.scatter( [robot_pos[0]], [robot_pos[1]], color='red') plt.scatter( [estimate[0]], [estimate[1]], color='green') plt.plot([spath[0], spath[2]], [spath[1], spath[3]], color = 'green') #plt.plot([pt[0], new_pt[0]], [pt[1], new_pt[1]], "bs:") plt.pause(0.001) map_box = (x0, y0, x1, y1) plt.gca().set_title("Terminated.") plt.draw() plt.show(block=True) if __name__ == '__main__': main()
# Copyright 2014 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Paul Michali, Cisco Systems, Inc. """Test module for interim implementation - to be removed later. This tests using an INI file to obtain Cisco CSR router information for IPSec site-to-site connections. Once the Cisco L3 router plugin blueprint has been up-streamed, this can be removed and production code switched to use the L3 plugin methods for: get_host_for_router() get_active_routers_for_host() TODO(pcm): remove module, when Cisco L3 router plugin is up-streamed. """ import os import tempfile import mock from oslo.config import cfg from neutron import context as ctx from neutron.openstack.common import uuidutils from neutron.services.vpn.device_drivers import ( cisco_csr_rest_client as csr_client) from neutron.services.vpn.service_drivers import ( cisco_cfg_loader as cfg_loader) from neutron.tests import base _uuid = uuidutils.generate_uuid FAKE_ROUTER_ID = _uuid() CISCO_GET_ROUTER_IP = ('neutron.services.vpn.service_drivers.' 'cisco_cfg_loader._get_external_ip_for_router') CISCO_GET_ROUTER_ID = ('neutron.services.vpn.service_drivers.' 'cisco_cfg_loader._get_router_id_via_external_ip') def create_tempfile(contents): (fd, path) = tempfile.mkstemp(prefix='test', suffix='.conf') try: os.write(fd, contents.encode('utf-8')) finally: os.close(fd) return path class TestCiscoCsrServiceDriverConfigLoading(base.BaseTestCase): def test_loading_csr_configuration(self): """Ensure that Cisco CSR configs can be loaded from config files.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') expected = {'3.2.1.1': {'rest_mgmt_ip': '10.20.30.1', 'tunnel_ip': '3.2.1.3', 'username': 'me', 'password': 'secret', 'host': 'compute-node', 'tunnel_if': 'GigabitEthernet3', 'timeout': 5.0}} csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual(expected, csrs_found) def test_loading_config_without_timeout(self): """Cisco CSR config without timeout will use default timeout.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n') expected = {'3.2.1.1': {'rest_mgmt_ip': '10.20.30.1', 'tunnel_ip': '3.2.1.3', 'username': 'me', 'password': 'secret', 'host': 'compute-node', 'tunnel_if': 'GigabitEthernet3', 'timeout': csr_client.TIMEOUT}} csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual(expected, csrs_found) def test_skip_loading_duplicate_csr_configuration(self): """Failure test that duplicate configurations are ignored.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 5.5.5.3\n' 'tunnel_ip = 3.2.1.6\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n') expected = {'3.2.1.1': {'rest_mgmt_ip': '10.20.30.1', 'tunnel_ip': '3.2.1.3', 'username': 'me', 'password': 'secret', 'host': 'compute-node', 'tunnel_if': 'GigabitEthernet3', 'timeout': 5.0}} csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual(expected, csrs_found) def test_fail_loading_config_with_invalid_timeout(self): """Failure test of invalid timeout in config info.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = yes\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_fail_loading_config_missing_required_info(self): """Failure test of config missing required info.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:1.1.1.0]\n' # No rest_mgmt 'tunnel_ip = 1.1.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:2.2.2.0]\n' 'rest_mgmt = 10.20.30.2\n' # No tunnel_ip 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:3.3.3.0]\n' 'rest_mgmt = 10.20.30.3\n' 'tunnel_ip = 3.3.3.3\n' # No username 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:4.4.4.0]\n' 'rest_mgmt = 10.20.30.4\n' 'tunnel_ip = 4.4.4.4\n' 'username = me\n' # No password 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:5.5.5.0]\n' 'rest_mgmt = 10.20.30.5\n' 'tunnel_ip = 5.5.5.5' 'username = me\n' 'password = secret\n' # No host 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:6.6.6.0]\n' 'rest_mgmt = 10.20.30.6\n' 'tunnel_ip = 6.6.6.6' 'username = me\n' 'password = secret\n' 'host = compute-node\n' # No tunnel_if 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_fail_loading_config_with_invalid_router_id(self): """Failure test of config with invalid rotuer ID.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:4.3.2.1.9]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 4.3.2.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_fail_loading_config_with_invalid_mgmt_ip(self): """Failure test of configuration with invalid management IP address.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 1.1.1.1.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_fail_loading_config_with_invalid_tunnel_ip(self): """Failure test of configuration with invalid tunnel IP address.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 1.1.1.1\n' 'tunnel_ip = 3.2.1.4.5\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_failure_no_configurations_entries(self): """Failure test config file without any CSR definitions.""" cfg_file = create_tempfile('NO CISCO SECTION AT ALL\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_failure_no_csr_configurations_entries(self): """Failure test config file without any CSR definitions.""" cfg_file = create_tempfile('[SOME_CONFIG:123]\n' 'username = me\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_missing_config_value(self): """Failure test of config file missing a value for attribute.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = \n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) def test_ignores_invalid_attribute_in_config(self): """Test ignoring of config file with invalid attribute.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 1.1.1.1\n' 'bogus = abcdef\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 15.5\n') expected = {'3.2.1.1': {'rest_mgmt_ip': '1.1.1.1', 'tunnel_ip': '3.2.1.3', 'username': 'me', 'password': 'secret', 'host': 'compute-node', 'tunnel_if': 'GigabitEthernet3', 'timeout': 15.5}} csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual(expected, csrs_found) def test_invalid_management_interface(self): """Failure test of invalid management interface name.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 1.1.1.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = compute-node\n' 'tunnel_if = GigabitEthernet9\n' 'timeout = 5.0\n') csrs_found = cfg_loader.get_available_csrs_from_config([cfg_file]) self.assertEqual({}, csrs_found) class TestCiscoCsrRouterInfo(base.BaseTestCase): def setUp(self): super(TestCiscoCsrRouterInfo, self).setUp() self.context = ctx.get_admin_context() def test_find_host_for_router(self): """Look up host in INI file for a router.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet1\n' 'mgmt_vlan = 100\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_IP, return_value='3.2.1.1').start() self.assertEqual('ubuntu', cfg_loader.get_host_for_router(self.context, FAKE_ROUTER_ID)) def test_failed_to_find_host_as_no_routers_in_ini(self): """Fail to find host, as no router info in INI file.""" cfg_file = create_tempfile('\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_IP, return_value='5.5.5.5').start() self.assertEqual('', cfg_loader.get_host_for_router(self.context, FAKE_ROUTER_ID)) def test_failed_no_matching_router_to_obtain_host(self): """Fail to find INI info for router provided.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_IP, return_value='5.5.5.5').start() self.assertEqual('', cfg_loader.get_host_for_router(self.context, FAKE_ROUTER_ID)) def test_failed_to_find_router_ip(self): """Fail to lookup router IP, preventing search in INI file.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_IP, return_value=None).start() self.assertEqual('', cfg_loader.get_host_for_router(self.context, FAKE_ROUTER_ID)) def _get_router_id_from_external_ip(self, context, ip): if ip == '3.2.1.1': return '123' elif ip == '4.3.2.1': return '456' def test_get_one_active_router_for_host(self): """Get router info from INI for host specified.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.3\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet2\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_ID, side_effect=self._get_router_id_from_external_ip).start() expected = { 'id': '123', 'hosting_device': { 'management_ip_address': '10.20.30.1', 'credentials': {'username': 'me', 'password': 'secret'} }, 'tunnel_if': 'GigabitEthernet2', 'tunnel_ip': '3.2.1.3' } routers = cfg_loader.get_active_routers_for_host(self.context, "ubuntu") self.assertEqual([expected], routers) def test_get_two_active_routers_for_host(self): """Get info for two routers, from INI file, for host specified.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:3.2.1.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 3.2.1.1\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet2\n' 'timeout = 5.0\n' '[CISCO_CSR_REST:4.3.2.1]\n' 'rest_mgmt = 10.20.30.2\n' 'tunnel_ip = 4.3.2.1\n' 'username = you\n' 'password = insecure\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_ID, side_effect=self._get_router_id_from_external_ip).start() expected_a = { 'id': '123', 'hosting_device': { 'management_ip_address': '10.20.30.1', 'credentials': {'username': 'me', 'password': 'secret'} }, 'tunnel_if': 'GigabitEthernet2', 'tunnel_ip': '3.2.1.1' } expected_b = { 'id': '456', 'hosting_device': { 'management_ip_address': '10.20.30.2', 'credentials': {'username': 'you', 'password': 'insecure'} }, 'tunnel_if': 'GigabitEthernet3', 'tunnel_ip': '4.3.2.1' } routers = cfg_loader.get_active_routers_for_host(self.context, "ubuntu") sorted_routers = sorted(routers, key=lambda key: key['id']) self.assertEqual([expected_a, expected_b], sorted_routers) def test_failure_to_find_routers_for_host(self): """Fail to find a router in INI with matching host name.""" routers = cfg_loader.get_active_routers_for_host(self.context, "bogus") self.assertEqual([], routers) def test_failure_to_lookup_router_id_for_host(self): """Fail to get router UUID for router in INI matching host name.""" cfg_file = create_tempfile( '[CISCO_CSR_REST:6.6.6.1]\n' 'rest_mgmt = 10.20.30.1\n' 'tunnel_ip = 6.6.6.1\n' 'username = me\n' 'password = secret\n' 'host = ubuntu\n' 'tunnel_if = GigabitEthernet3\n' 'timeout = 5.0\n') cfg.CONF.set_override('config_file', [cfg_file]) mock.patch(CISCO_GET_ROUTER_ID, side_effect=self._get_router_id_from_external_ip).start() routers = cfg_loader.get_active_routers_for_host(self.context, "ubuntu") self.assertEqual([], routers)
""" The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function from __future__ import division # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]> # Andreas Mueller <[email protected]> # Olivier Grisel <[email protected]> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized, defaultdict from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from ..base import BaseEstimator, is_classifier, clone from ..base import MetaEstimatorMixin from ._split import check_cv from ._validation import _fit_and_score from ..exceptions import NotFittedError from ..externals.joblib import Parallel, delayed from ..externals import six from ..utils import check_random_state from ..utils.fixes import sp_version from ..utils.fixes import rankdata from ..utils.random import sample_without_replacement from ..utils.validation import indexable, check_is_fitted from ..utils.metaestimators import if_delegate_has_method from ..metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.model_selection import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: Uses :class:`ParameterGrid` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Deterministic behavior is however guaranteed from SciPy 0.16 onwards. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.model_selection import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d. For exhaustive searches, use " "GridSearchCV." % (grid_size, self.n_iter)) for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): if sp_version < (0, 16): params[k] = v.rvs() else: params[k] = v.rvs(random_state=rnd) else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for name, v in p.items(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values for parameter ({0}) need " "to be a sequence.".format(name)) if len(v) == 0: raise ValueError("Parameter values for parameter ({0}) need " "to be a non-empty sequence.".format(name)) # XXX Remove in 0.20 class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) return self.scorer_(self.best_estimator_, X, y) def _check_is_fitted(self, method_name): if not self.refit: raise NotFittedError(('This GridSearchCV instance was initialized ' 'with refit=False. %s is ' 'available only after refitting on the best ' 'parameters. ') % method_name) else: check_is_fitted(self, 'best_estimator_') @if_delegate_has_method(delegate='estimator') def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict') return self.best_estimator_.predict(X) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_proba') return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_log_proba') return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='estimator') def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('decision_function') return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate='estimator') def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('transform') return self.best_estimator_.transform(X) @if_delegate_has_method(delegate='estimator') def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('inverse_transform') return self.best_estimator_.transform(Xt) def _fit(self, X, y, labels, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = check_cv(self.cv, y, classifier=is_classifier(estimator)) self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) X, y, labels = indexable(X, y, labels) n_splits = cv.get_n_splits(X, y, labels) if self.verbose > 0 and isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv.split(X, y, labels)) test_scores, test_sample_counts, _, parameters = zip(*out) candidate_params = parameters[::n_splits] n_candidates = len(candidate_params) test_scores = np.array(test_scores, dtype=np.float64).reshape(n_candidates, n_splits) # NOTE test_sample counts (weights) remain the same for all candidates test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int) # Computed the (weighted) mean and std for all the candidates weights = test_sample_counts if self.iid else None means = np.average(test_scores, axis=1, weights=weights) stds = np.sqrt(np.average((test_scores - means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results = dict() for split_i in range(n_splits): results["test_split%d_score" % split_i] = test_scores[:, split_i] results["test_mean_score"] = means results["test_std_score"] = stds ranks = np.asarray(rankdata(-means, method='min'), dtype=np.int32) best_index = np.flatnonzero(ranks == 1)[0] best_parameters = candidate_params[best_index] results["test_rank_score"] = ranks # Use one np.MaskedArray and mask all the places where the param is not # applicable for that candidate. Use defaultdict as each candidate may # not contain all the params param_results = defaultdict(partial(np.ma.masked_all, (n_candidates,), dtype=object)) for cand_i, params in enumerate(candidate_params): for name, value in params.items(): # An all masked empty array gets created for the key # `"param_%s" % name` at the first occurence of `name`. # Setting the value at an index also unmasks that index param_results["param_%s" % name][cand_i] = value results.update(param_results) # Store a list of param dicts at the key 'params' results['params'] = candidate_params self.results_ = results self.best_index_ = best_index self.n_splits_ = n_splits if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best_parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self @property def best_params_(self): check_is_fitted(self, 'results_') return self.results_['params'][self.best_index_] @property def best_score_(self): check_is_fitted(self, 'results_') return self.results_['test_mean_score'][self.best_index_] @property def grid_scores_(self): warnings.warn( "The grid_scores_ attribute was deprecated in version 0.18" " in favor of the more elaborate results_ attribute." " The grid_scores_ attribute will not be available from 0.20", DeprecationWarning) check_is_fitted(self, 'results_') grid_scores = list() for i, (params, mean, std) in enumerate(zip( self.results_['params'], self.results_['test_mean_score'], self.results_['test_std_score'])): scores = np.array(list(self.results_['test_split%d_score' % s][i] for s in range(self.n_splits_)), dtype=np.float64) grid_scores.append(_CVScoreTuple(params, mean, scores)) return grid_scores class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, datasets >>> from sklearn.model_selection import GridSearchCV >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape=None, degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) >>> sorted(clf.results_.keys()) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS ['param_C', 'param_kernel', 'params', 'test_mean_score',... 'test_rank_score', 'test_split0_score', 'test_split1_score',... 'test_split2_score', 'test_std_score'] Attributes ---------- results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|test_split0_score|...|...rank..| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``results_`` dict of:: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'test_split0_score' : [0.8, 0.7, 0.8, 0.9], 'test_split1_score' : [0.82, 0.5, 0.7, 0.78], 'test_mean_score' : [0.81, 0.60, 0.75, 0.82], 'test_std_score' : [0.02, 0.01, 0.03, 0.03], 'test_rank_score' : [2, 4, 3, 1], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a hyperparameter grid. :func:`sklearn.model_selection.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None, labels=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. """ return self._fit(X, y, labels, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | test_split0_score |...|test_rank_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``results_`` dict of:: { 'param_kernel' : masked_array(data = ['rbf', rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'test_split0_score' : [0.8, 0.9, 0.7], 'test_split1_score' : [0.82, 0.5, 0.7], 'test_mean_score' : [0.81, 0.7, 0.7], 'test_std_score' : [0.02, 0.2, 0.], 'test_rank_score' : [3, 1, 1], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None, labels=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, labels, sampled_params)
from datetime import timedelta import numpy as np from pandas.core.groupby import BinGrouper, Grouper from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod from pandas.tseries.index import DatetimeIndex, date_range from pandas.tseries.tdi import TimedeltaIndex from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds from pandas.tseries.period import PeriodIndex, period_range import pandas.core.common as com import pandas.compat as compat from pandas.lib import Timestamp import pandas.lib as lib import pandas.tslib as tslib _DEFAULT_METHOD = 'mean' class TimeGrouper(Grouper): """ Custom groupby class for time-interval grouping Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges closed : closed end of interval; left or right label : interval boundary to use for labeling; left or right nperiods : optional, integer convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex Notes ----- Use begin, end, nperiods to generate intervals that cannot be derived directly from the associated object """ def __init__(self, freq='Min', closed=None, label=None, how='mean', nperiods=None, axis=0, fill_method=None, limit=None, loffset=None, kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) rule = freq.rule_code if (rule in end_types or ('-' in rule and rule[:rule.find('-')] in end_types)): if closed is None: closed = 'right' if label is None: label = 'right' else: if closed is None: closed = 'left' if label is None: label = 'left' self.closed = closed self.label = label self.nperiods = nperiods self.kind = kind self.convention = convention or 'E' self.convention = self.convention.lower() self.loffset = loffset self.how = how self.fill_method = fill_method self.limit = limit self.base = base # always sort time groupers kwargs['sort'] = True super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs) def resample(self, obj): self._set_grouper(obj, sort=True) ax = self.grouper if isinstance(ax, DatetimeIndex): rs = self._resample_timestamps() elif isinstance(ax, PeriodIndex): offset = to_offset(self.freq) if offset.n > 1: if self.kind == 'period': # pragma: no cover print('Warning: multiple of frequency -> timestamps') # Cannot have multiple of periods, convert to timestamp self.kind = 'timestamp' if self.kind is None or self.kind == 'period': rs = self._resample_periods() else: obj = self.obj.to_timestamp(how=self.convention) self._set_grouper(obj) rs = self._resample_timestamps() elif isinstance(ax, TimedeltaIndex): rs = self._resample_timestamps(kind='timedelta') elif len(ax) == 0: return self.obj else: # pragma: no cover raise TypeError('Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex') rs_axis = rs._get_axis(self.axis) rs_axis.name = ax.name return rs def _get_grouper(self, obj): self._set_grouper(obj) return self._get_binner_for_resample() def _get_binner_for_resample(self, kind=None): # create the BinGrouper # assume that self.set_grouper(obj) has already been called ax = self.ax if kind is None: kind = self.kind if kind is None or kind == 'timestamp': self.binner, bins, binlabels = self._get_time_bins(ax) elif kind == 'timedelta': self.binner, bins, binlabels = self._get_time_delta_bins(ax) else: self.binner, bins, binlabels = self._get_time_period_bins(ax) self.grouper = BinGrouper(bins, binlabels) return self.binner, self.grouper, self.obj def _get_binner_for_grouping(self, obj): # return an ordering of the transformed group labels, # suitable for multi-grouping, e.g the labels for # the resampled intervals ax = self._set_grouper(obj) self._get_binner_for_resample() # create the grouper binner = self.binner l = [] for key, group in self.grouper.get_iterator(ax): l.extend([key]*len(group)) grouper = binner.__class__(l,freq=binner.freq,name=binner.name) # since we may have had to sort # may need to reorder groups here if self.indexer is not None: indexer = self.indexer.argsort(kind='quicksort') grouper = grouper.take(indexer) return grouper def _get_time_bins(self, ax): if not isinstance(ax, DatetimeIndex): raise TypeError('axis must be a DatetimeIndex, but got ' 'an instance of %r' % type(ax).__name__) if len(ax) == 0: binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels first, last = ax.min(), ax.max() first, last = _get_range_edges(first, last, self.freq, closed=self.closed, base=self.base) tz = ax.tz binner = labels = DatetimeIndex(freq=self.freq, start=first.replace(tzinfo=None), end=last.replace(tzinfo=None), tz=tz, name=ax.name) # a little hack trimmed = False if (len(binner) > 2 and binner[-2] == last and self.closed == 'right'): binner = binner[:-1] trimmed = True ax_values = ax.asi8 binner, bin_edges = self._adjust_bin_edges(binner, ax_values) # general version, knowing nothing about relative frequencies bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans) if self.closed == 'right': labels = binner if self.label == 'right': labels = labels[1:] elif not trimmed: labels = labels[:-1] else: if self.label == 'right': labels = labels[1:] elif not trimmed: labels = labels[:-1] if ax.hasnans: binner = binner.insert(0, tslib.NaT) labels = labels.insert(0, tslib.NaT) # if we end up with more labels than bins # adjust the labels # GH4076 if len(bins) < len(labels): labels = labels[:len(bins)] return binner, bins, labels def _adjust_bin_edges(self, binner, ax_values): # Some hacks for > daily data, see #1471, #1458, #1483 bin_edges = binner.asi8 if self.freq != 'D' and is_superperiod(self.freq, 'D'): day_nanos = _delta_to_nanoseconds(timedelta(1)) if self.closed == 'right': bin_edges = bin_edges + day_nanos - 1 # intraday values on last day if bin_edges[-2] > ax_values.max(): bin_edges = bin_edges[:-1] binner = binner[:-1] return binner, bin_edges def _get_time_delta_bins(self, ax): if not isinstance(ax, TimedeltaIndex): raise TypeError('axis must be a TimedeltaIndex, but got ' 'an instance of %r' % type(ax).__name__) if not len(ax): binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels labels = binner = TimedeltaIndex(start=ax[0], end=ax[-1], freq=self.freq, name=ax.name) end_stamps = labels + 1 bins = ax.searchsorted(end_stamps, side='left') return binner, bins, labels def _get_time_period_bins(self, ax): if not isinstance(ax, DatetimeIndex): raise TypeError('axis must be a DatetimeIndex, but got ' 'an instance of %r' % type(ax).__name__) if not len(ax): binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels labels = binner = PeriodIndex(start=ax[0], end=ax[-1], freq=self.freq, name=ax.name) end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp() if ax.tzinfo: end_stamps = end_stamps.tz_localize(ax.tzinfo) bins = ax.searchsorted(end_stamps, side='left') return binner, bins, labels @property def _agg_method(self): return self.how if self.how else _DEFAULT_METHOD def _resample_timestamps(self, kind=None): # assumes set_grouper(obj) already called axlabels = self.ax self._get_binner_for_resample(kind=kind) grouper = self.grouper binner = self.binner obj = self.obj # Determine if we're downsampling if axlabels.freq is not None or axlabels.inferred_freq is not None: if len(grouper.binlabels) < len(axlabels) or self.how is not None: # downsample grouped = obj.groupby(grouper, axis=self.axis) result = grouped.aggregate(self._agg_method) # GH2073 if self.fill_method is not None: result = result.fillna(method=self.fill_method, limit=self.limit) else: # upsampling shortcut if self.axis: raise AssertionError('axis must be 0') if self.closed == 'right': res_index = binner[1:] else: res_index = binner[:-1] # if we have the same frequency as our axis, then we are equal sampling # even if how is None if self.fill_method is None and self.limit is None and to_offset( axlabels.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=self.fill_method, limit=self.limit) else: # Irregular data, have to use groupby grouped = obj.groupby(grouper, axis=self.axis) result = grouped.aggregate(self._agg_method) if self.fill_method is not None: result = result.fillna(method=self.fill_method, limit=self.limit) loffset = self.loffset if isinstance(loffset, compat.string_types): loffset = to_offset(self.loffset) if isinstance(loffset, (DateOffset, timedelta)): if (isinstance(result.index, DatetimeIndex) and len(result.index) > 0): result.index = result.index + loffset return result def _resample_periods(self): # assumes set_grouper(obj) already called axlabels = self.ax obj = self.obj if len(axlabels) == 0: new_index = PeriodIndex(data=[], freq=self.freq) return obj.reindex(new_index) else: start = axlabels[0].asfreq(self.freq, how=self.convention) end = axlabels[-1].asfreq(self.freq, how='end') new_index = period_range(start, end, freq=self.freq) # Start vs. end of period memb = axlabels.asfreq(self.freq, how=self.convention) if is_subperiod(axlabels.freq, self.freq) or self.how is not None: # Downsampling rng = np.arange(memb.values[0], memb.values[-1] + 1) bins = memb.searchsorted(rng, side='right') grouper = BinGrouper(bins, new_index) grouped = obj.groupby(grouper, axis=self.axis) return grouped.aggregate(self._agg_method) elif is_superperiod(axlabels.freq, self.freq): # Get the fill indexer indexer = memb.get_indexer(new_index, method=self.fill_method, limit=self.limit) return _take_new_index(obj, indexer, new_index, axis=self.axis) else: raise ValueError('Frequency %s cannot be resampled to %s' % (axlabels.freq, self.freq)) def _take_new_index(obj, indexer, new_index, axis=0): from pandas.core.api import Series, DataFrame if isinstance(obj, Series): new_values = com.take_1d(obj.values, indexer) return Series(new_values, index=new_index, name=obj.name) elif isinstance(obj, DataFrame): if axis == 1: raise NotImplementedError("axis 1 is not supported") return DataFrame(obj._data.reindex_indexer( new_axis=new_index, indexer=indexer, axis=1)) else: raise ValueError("'obj' should be either a Series or a DataFrame") def _get_range_edges(first, last, offset, closed='left', base=0): if isinstance(offset, compat.string_types): offset = to_offset(offset) if isinstance(offset, Tick): is_day = isinstance(offset, Day) day_nanos = _delta_to_nanoseconds(timedelta(1)) # #1165 if (is_day and day_nanos % offset.nanos == 0) or not is_day: return _adjust_dates_anchored(first, last, offset, closed=closed, base=base) if not isinstance(offset, Tick): # and first.time() != last.time(): # hack! first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(offset.rollback(first)) else: first = Timestamp(first - offset) last = Timestamp(last + offset) return first, last def _adjust_dates_anchored(first, last, offset, closed='right', base=0): # from pandas.tseries.tools import normalize_date # First and last offsets should be calculated from the start day to fix an # error cause by resampling across multiple days when a one day period is # not a multiple of the frequency. # # See https://github.com/pydata/pandas/issues/8683 first_tzinfo = first.tzinfo first = first.tz_localize(None) last = last.tz_localize(None) start_day_nanos = first.normalize().value base_nanos = (base % offset.n) * offset.nanos // offset.n start_day_nanos += base_nanos foffset = (first.value - start_day_nanos) % offset.nanos loffset = (last.value - start_day_nanos) % offset.nanos if closed == 'right': if foffset > 0: # roll back fresult = first.value - foffset else: fresult = first.value - offset.nanos if loffset > 0: # roll forward lresult = last.value + (offset.nanos - loffset) else: # already the end of the road lresult = last.value else: # closed == 'left' if foffset > 0: fresult = first.value - foffset else: # start of the road fresult = first.value if loffset > 0: # roll forward lresult = last.value + (offset.nanos - loffset) else: lresult = last.value + offset.nanos # return (Timestamp(fresult, tz=first.tz), # Timestamp(lresult, tz=last.tz)) return (Timestamp(fresult).tz_localize(first_tzinfo), Timestamp(lresult).tz_localize(first_tzinfo)) def asfreq(obj, freq, method=None, how=None, normalize=False): """ Utility frequency conversion method for Series/DataFrame """ if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' new_index = obj.index.asfreq(freq, how=how) new_obj = obj.copy() new_obj.index = new_index return new_obj else: if len(obj.index) == 0: return obj.copy() dti = date_range(obj.index[0], obj.index[-1], freq=freq) dti.name = obj.index.name rs = obj.reindex(dti, method=method) if normalize: rs.index = rs.index.normalize() return rs
import numpy as np from math import log,sqrt,e import iohelpers import sys import copy import time import subprocess import math LIKEITSCONST = 100.0 MAXVALIDITERSCONST = 1000.0 NOTIMPROVCONST = 3 IMPROVEPS = 0.1 VERBOSE = True class EmWFA: def __init__(self, n_components, n_symbols): self.n_components = n_components self.n_symbols = n_symbols def _parse_model(self, trebaoutput): trebaoutputlines = trebaoutput.split("\n") self.As = {} for symbol in range(self.n_symbols): self.As[symbol] = np.empty((self.n_components, self.n_components)) self.wordstopvec = np.zeros((self.n_components)) for line in trebaoutputlines: entries = line.split(' ') if len(entries) == 4: source_state = int(entries[0]) target_state = int(entries[1]) symbol = int(entries[2]) prob = float(entries[3]) Asym = self.As[symbol] Asym[source_state, target_state] = prob elif len(entries) == 2: source_state = int(entries[0]) stopprob = float(entries[1]) self.wordstopvec[source_state] = stopprob self.stopvec = np.mat(np.ones((self.n_components))).T self.wordstopvec = np.mat(self.wordstopvec).T self.initvec = np.mat(np.zeros((self.n_components))).T self.initvec[0] = 1 self.a = self.initvec.copy() def realfit(self, obsFile, testdata, validdata): #must use wall time since subprocess being called.. starttime = time.time() fp = open(".emtemp.fsm", "w") likeits = int(LIKEITSCONST/float(self.n_components)) numvalidits = int(MAXVALIDITERSCONST/float(self.n_components)) trebaoutput = subprocess.check_output(["treba","--train=bw","--initialize="+str(self.n_components), "--max-delta=0.5", "--restarts=5,"+str(likeits), "--max-iter=1", obsFile]) fp.write(trebaoutput) fp.flush() lastwer = 0 num_not_improv = 0 for i in range(numvalidits): trebaoutput = subprocess.check_output(["treba","--train=bw","--file=.emtemp.fsm", "--max-iter="+str(likeits), obsFile]) fp = open(".emtemp.fsm", "w") fp.write(trebaoutput) fp.flush() fp.close() self._parse_model(trebaoutput) kl = self.get_perplexity(validdata[0:1000]) wer = self.get_WER(validdata[0:1000]) if VERBOSE: print "WER: ", wer, " KL:", kl if lastwer == 0: lastwer = wer lastkl = kl bestwer = wer bestkl = kl self.bestwfa = copy.deepcopy(self) elif lastkl - kl < IMPROVEPS and lastwer - wer < IMPROVEPS: num_not_improv += 1 else: num_not_improv = 0 if lastkl > kl: lastkl = kl if lastwer > wer: lastwer = wer if kl < bestkl: bestkl = kl if wer < bestwer: bestwer = wer self.bestwfa = copy.deepcopy(self) if num_not_improv >= NOTIMPROVCONST: break self.kl = bestkl self.wer = bestwer self.buildtime = time.time()-starttime self._parse_model(trebaoutput) def fit(self, obsFile, testdata, validdata,groundtruth): #must use wall time since subprocess being called.. starttime = time.time() fp = open(".emtemp.fsm", "w") likeits = int(LIKEITSCONST/float(self.n_components)) numvalidits = int(MAXVALIDITERSCONST/float(self.n_components)) trebaoutput = subprocess.check_output(["treba","--train=bw","--initialize="+str(self.n_components), "--max-delta=0.5", "--restarts=5,"+str(likeits), "--max-iter=1", obsFile]) fp.write(trebaoutput) fp.flush() lastwer = 0 num_not_improv = 0 for i in range(numvalidits): trebaoutput = subprocess.check_output(["treba","--train=bw","--file=.emtemp.fsm", "--max-iter="+str(likeits), obsFile]) fp = open(".emtemp.fsm", "w") fp.write(trebaoutput) fp.flush() fp.close() self._parse_model(trebaoutput) kl = self.scorepautomac(testdata,groundtruth) wer = self.get_WER(validdata) if VERBOSE: print "WER: ", wer, " KL:", kl if lastwer == 0: lastwer = wer lastkl = kl bestwer = wer bestkl = kl self.bestwfa = copy.deepcopy(self) elif lastkl - kl < IMPROVEPS and lastwer - wer < IMPROVEPS: num_not_improv += 1 else: num_not_improv = 0 if lastkl > kl: lastkl = kl if lastwer > wer: lastwer = wer if kl < bestkl: bestkl = kl if wer < bestwer: bestwer = wer self.bestwfa = copy.deepcopy(self) if num_not_improv >= NOTIMPROVCONST: break self.kl = bestkl self.wer = bestwer self.buildtime = time.time()-starttime self._parse_model(trebaoutput) #returns average log-likelihood of sequence in the data def score(self, data): loglike = 0 for seq in data: seqloglike = 0 self.reset() for obs in seq: seqloglike = seqloglike + log(self.get_obs_prob(obs)) self.update(obs) seqloglike = seqloglike + log(self.a.T*self.wordstopvec) loglike += seqloglike return loglike/(float(len(data))) #returns the probability assigned to a word. def get_word_prob(self,seq): seqprob = 0 for obs in seq: prob = self.get_obs_prob(obs) if prob <= 0: return np.finfo(float).eps seqprob += log(prob) self.update(obs) endprob = float(self.a.T*self.wordstopvec) if endprob <= 0: return np.finfo(float).eps seqprob += log(endprob) self.reset() if not math.isnan(seqprob): return e**seqprob else: return np.finfo(float).eps def get_symbol_prediction(self): predictedsymbol = -1 maxscore = np.finfo(float).eps for symbol in range(self.n_symbols): symbolscore = self.get_obs_prob(symbol) if symbolscore > maxscore: predictedsymbol = symbol maxscore = symbolscore stopscore = float(self.a.T*self.wordstopvec) if stopscore > maxscore: predictedsymbol = self.n_symbols return predictedsymbol def get_WER(self, testdata): errors = 0 numpredictions = 0 for seq in testdata: for obs in seq: numpredictions += 1 predsymbol = self.get_symbol_prediction() self.update(obs) if predsymbol != obs: errors += 1 predsymbol = self.get_symbol_prediction() numpredictions += 1 if predsymbol != self.n_symbols: errors += 1 self.reset() return float(errors)/float(numpredictions) def get_perplexity(self, testdata): modelprobs = np.zeros((len(testdata))) probsum = 0 i = 0 for seq in testdata: prob = self.get_word_prob(seq) modelprobs[i] = prob probsum += prob i += 1 scoresum = 0 for i in range(len(modelprobs)): if modelprobs[i] < np.finfo(float).eps: modelprobs[i] = np.finfo(float).eps scoresum += log(modelprobs[i],2) scoresum /= float(len(testdata)) return 2.0**(-1.0*float(scoresum)) #updates normalized internal state def update(self, symbol): Amat = np.mat(self.As[symbol]) numerator = self.a.T*Amat denom = numerator*self.stopvec self.a = (numerator/denom).T #resets to inital state def reset(self): self.a = self.initvec.copy() #gets probability of single symbol/observation conditioned on current internal state def get_obs_prob(self, symbol): prob = (self.a.T)*(self.As[symbol])*self.stopvec prob = min(prob,1) prob = max(prob,np.finfo(float).eps) return prob def scorepautomac(self, testdata, truprobs): modelprobs = np.zeros((len(truprobs))) probsum = 0 i = 0 for seq in testdata: prob = self.get_word_prob(seq) modelprobs[i] = prob probsum += prob i += 1 modelprobs /= float(probsum) i = 0 scoresum = 0 for truprob in truprobs: if modelprobs[i] < np.finfo(float).eps: modelprobs[i] = np.finfo(float).eps scoresum += truprob*log(modelprobs[i],2) i += 1 return 2.0**(-1.0*float(scoresum)) def check_if_pnfa(self): negativeentries = False for Amat in self.As.values(): if not np.all(Amat > 0): negativeentries = True break residuals = np.zeros((self.hankelmat.shape[0])) for i in range(self.hankelmat.shape[0]): rowsum = 0 for Amat in self.As.values(): rowsum += np.sum(Amat[i,:]) rowsum += float(self.wordstopvec[i][0]) residuals[i] = rowsum - 1 return negativeentries, residuals if __name__ == "__main__": PAUTOMACPATH = "/home/williamleif/Dropbox/icml2014-experiments/datasets/PAutomaC-competition_sets/" RESULTS_DIR = "/home/williamleif/Dropbox/icml2014-experiments/results/em/" problem = sys.argv[1] n_symbols = sys.argv[2] n_symbols = int(n_symbols) if problem != "tree" and problem != "timeseries": testdata = iohelpers.parse_file(PAUTOMACPATH+problem+".pautomac.test") validdata = iohelpers.parse_file(PAUTOMACPATH+problem+".pautomac.train")[15000:20000] iohelpers.clean_pautomacfile_for_em(PAUTOMACPATH+problem+".pautomac.train", PAUTOMACPATH+problem+".pautomac.em") groundtruth = iohelpers.parse_groundtruth_file(PAUTOMACPATH+problem+".pautomac_solution.txt") avruntime = 0 nummodelsmade = 0 klsize = 0 sizes = [5] sizes.extend(range(10,41,10)) for i in sizes: wfa = EmWFA(i, n_symbols) wfa.fit(PAUTOMACPATH+problem+".pautomac.em",testdata,validdata, groundtruth) kl = wfa.kl wer = wfa.bestwfa.get_WER(testdata) if i == 30: thirtykl = wfa.kl thirtywer = wfa.wer if klsize == 0: bestkl = kl bestwer = wer klsize = i wersize = i else: if kl < bestkl: bestkl = kl klsize = i if wer < bestwer: bestwer = wer wersize = i avruntime += wfa.buildtime nummodelsmade += 1 print "Model size: ", i, " KL: ", kl, " WER: ", wer iohelpers.write_results(RESULTS_DIR+"em-pautomac="+problem+".txt", problem, "KL size:"+str(klsize)+" WER size: "+str(wersize)+" 30 KL: "+str(thirtykl)+" 30 WER: "+str(thirtywer), "KL, WER", str(bestkl)+","+str(bestwer), avruntime/float(nummodelsmade)) else: if problem == "tree": traindata = iohelpers.parse_file("/home/williamleif/Dropbox/icml2014-experiments/datasets/treebankdata.obs") validdata = traindata[0:5000] testdata = traindata[5000:10000] traindata = traindata[10000:len(traindata)] fp = open("treetemp.obs", "w") fp.write("\n".join([" ".join([str(j) for j in i]) for i in traindata])) fp.close() avruntime = 0 nummodelsmade = 0 klsize = 0 sizes = [5] sizes.extend(range(10,41,10)) for i in sizes: wfa = EmWFA(i, n_symbols) wfa.realfit("treetemp.obs",testdata,validdata) kl = wfa.kl wer = wfa.bestwfa.get_WER(testdata) if i == 30: thirtykl = wfa.kl thirtywer = wfa.wer if klsize == 0: bestkl = kl bestwer = wer klsize = i wersize = i else: if kl < bestkl: bestkl = kl klsize = i if wer < bestwer: bestwer = wer wersize = i avruntime += wfa.buildtime nummodelsmade += 1 print "Model size: ", i, " KL: ", kl, " WER: ", wer iohelpers.write_results(RESULTS_DIR+"em-pautomac="+problem+".txt", problem, "KL size:"+str(klsize)+" WER size: "+str(wersize)+" 30 KL: "+str(thirtykl), "KL, WER", str(bestkl)+","+str(bestwer), avruntime/float(nummodelsmade))
from __future__ import with_statement """ Fabcommon is a reusable deployment script in fabric Copyright (c) 2015, Miguel Marcos. License: MIT (see LICENSE for details) """ __author__ = 'Miguel Marcos' __version__ = '0.9.2' __license__ = 'MIT' import os import re from fabric.api import local, settings, abort, env, run, cd, prefix from fabric.contrib.console import confirm # default settings env.repository_type = 'git' # can be 'release' - new virtualenv for every release, 'project' - one #virtualenv for all releases or None - no virtualenv. env.venv_scope = 'project' def sort_versions(versions, reverse=False): # small hack to ensure final versions come after pre-releases versions = [version + 'z' for version in versions] convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] versions = sorted(versions, key=alphanum_key) # remove hack after sorting versions = [version[:-1] for version in versions] if reverse: versions.reverse() return versions def verify_or_increase_version(version_pre_release, message): ''' If version is a recognized keyword it increments the version number by looking into the tag list, picking up the highest version that conforms to the semver.org spec, and increasing the correct level according to the pattern: major.minor.patch[-alpha.n|beta.n|rc.n] Possible keywords are: major|minor|patch|release[-]alpha|beta|rc alpha, beta and rc versions can only be created together with a new version or on top of an existing pre-release, so major-beta is possible but beta is only possible if an alpha pre-release or a beta pre-release already exists on the latest version. release is not possible to combine with alpha, beta or rc keywords. This keyword will remove the pre-release label and will keep the version number. If version is not a keyword and it exists in the tag list, then version is returned without any changes. If version is not a keyword and is not in the tag list, an Exception is raised ''' # get all tags into a list tags = local('git tag -l "*.*.*"', capture=True).split() # if it's an existing version, then just return it if version_pre_release in tags: return version_pre_release # otherwise it's assumed to be a keyword to increase the version number version_pre_release = version_pre_release.split('-') if len(version_pre_release) == 2 and \ version_pre_release[0] in ('major', 'minor', 'patch') and \ version_pre_release[1] in ('alpha', 'beta', 'rc'): version, pre_release = version_pre_release elif len(version_pre_release) == 1: if version_pre_release[0] in ('alpha', 'beta', 'rc'): version = '' pre_release = version_pre_release[0] elif version_pre_release[0] in ('major', 'minor', 'patch', 'release'): version = version_pre_release[0] pre_release = '' else: raise Exception('Version does not exist or invalid keyword') else: raise Exception('Version does not exist or invalid keyword') if not tags: tags = ['0.0.0'] # do a natural sort with descending order tags = sort_versions(tags, reverse=True) # look for the first semver.org compatible version version_pattern = re.compile('(\d+)\.(\d+)\.(\d+)(-[^\+]+)?(\+.+)?') for tag in tags: version_match = version_pattern.match(tag) new_version = [] new_pre_release = [] if version_match: # create array with major, minor and patch numbers if version == 'major': new_version = [str(int(version_match.group(1)) + 1), '0', '0'] elif version == 'minor': new_version = [version_match.group(1), str(int(version_match.group(2)) + 1), '0'] elif version == 'patch': new_version = [version_match.group(1), version_match.group(2), str(int(version_match.group(3)) + 1)] elif version == 'release' and not version_match.group(4): raise Exception('There is already a final release, use ' +\ 'major|minor|patch[-]alpha|beta|rc instead') else: new_version = [(version_match.group(1)), (version_match.group(2)), (version_match.group(3))] # pre_release is optional, it is only possible to create a new # pre-release if a final version does not exist yet (one without a # pre-release appended), an alpha version cannot be # created/increased if a beta or rc version exists and a beta # version cannot be created/increased if an rc version exists. if pre_release: previous_pre_release = version_match.group(4) if version: new_pre_release = [pre_release, '1'] elif previous_pre_release: previous_pre_release = previous_pre_release[1:].split('.') if previous_pre_release[0] == pre_release: new_pre_release = [pre_release, \ str(int(previous_pre_release[1]) + 1)] elif previous_pre_release[0] == 'alpha' and \ pre_release in ('beta', 'rc'): new_pre_release = [pre_release, '1'] elif previous_pre_release[0] in ('alpha', 'beta') and \ pre_release == 'rc': new_pre_release = [pre_release, '1'] else: raise Exception('Unable to increase pre-release') increased_version = ('.'.join(new_version) + '-' + '.'.\ join(new_pre_release)).rstrip('-') local('git tag -a {0} -m \'{1}\''.format(increased_version, message)) local('git push --quiet --tags') return increased_version return None def deploy(version, message='', update_cron=False): releases_path = os.path.join(env.base_path, 'releases') virtualenv_python_path = '-p ' + env.python_path if env.python_path else '' # if no specific version number is specified we will increment the current # one based on how big the changes are. version = verify_or_increase_version(version, message) # create a releases directory if one doesn't exist run('mkdir -p ' + releases_path) with cd(releases_path): # export the source code to the build directory if not already there run('if [ ! -d ' + version + ' ]; then ' + \ 'git clone ' + env.repository + ' ' + version +\ ' && cd ' + version + ' && git checkout -b tags/' + version +\ ' && rm -rf .git; fi') if env.venv_scope == 'release': # setup virtualenv and install dependencies, if not already there run('cd ' + version + ' && if [ ! -d venv ]; then ' +\ 'virtualenv ' + virtualenv_python_path + ' venv ' +\ '&& source venv/bin/activate ' +\ '&& pip install -qr src/requirements.txt; fi') if env.venv_scope == 'release': # add a symlink to venv run('rm -rf ' + os.path.join(env.base_path, 'venv')) run('ln -sfn ' + os.path.join(releases_path, version, 'venv') + ' ' +\ os.path.join(env.base_path, 'venv')) elif env.venv_scope == 'project': with cd(env.base_path): run('if [ ! -d venv ]; then ' +\ 'rm -rf venv ' +\ '&& virtualenv ' + virtualenv_python_path + ' venv; fi ' +\ '&& source venv/bin/activate ' +\ '&& pip install -qr ' + os.path.join(releases_path, version, \ 'src/requirements.txt')) # create a logs and media dirs if they do not exist run('mkdir -p ' + os.path.join(env.base_path, 'logs')) run('mkdir -p ' + os.path.join(env.base_path, 'media')) # add a symlink to the media folder run('rm -rf ' + os.path.join(releases_path, version, 'media') +\ ' && ln -s '+ os.path.join(env.base_path, 'media') + ' ' +\ os.path.join(releases_path, version, 'media')) if env.pre_activate_task: env.pre_activate_task(releases_path, version) # update the crontab if crontab.txt exists and update_cron is True # the crontab can have a template variable {{ project_dir }} that will # be replace with the current if update_cron: with cd(os.path.join(releases_path, version)): run('if [ -f crontab.txt ]; then ' + \ 'crontab -r && cat crontab.txt | ' + \ 'sed -e \'s,{{ project_dir }},\'$PWD\',\' | crontab; fi') # activate the build run('ln -sfn ' + os.path.join(releases_path, version, 'src') + ' ' +\ os.path.join(env.base_path, 'src')) # only keep the 10 most recent releases with cd(releases_path): run('ls -t | sed \'s,\\(.*\\),"\\1",\' | tail -n+11 | xargs rm -rf')
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import multiprocessing import threading from builtins import next, object from multiprocessing.pool import ThreadPool from future.moves import _thread from pants.reporting.report import Report class Work(object): """Represents multiple concurrent calls to the same callable.""" def __init__(self, func, args_tuples, workunit_name=None): # A callable. self.func = func # A list of tuples of args. func will be called once per tuple, concurrently. # The length of this list is the cardinality of the work. self.args_tuples = args_tuples # If specified, each invocation will be executed in a workunit of this name. self.workunit_name = workunit_name class WorkerPool(object): """A pool of workers. Workers are threads, and so are subject to GIL constraints. Submitting CPU-bound work may not be effective. Use this class primarily for IO-bound work. """ def __init__(self, parent_workunit, run_tracker, num_workers): self._run_tracker = run_tracker # All workers accrue work to the same root. self._pool = ThreadPool(processes=num_workers, initializer=self._run_tracker.register_thread, initargs=(parent_workunit, )) # We mustn't shutdown when there are pending workchains, as they may need to submit work # in the future, and the pool doesn't know about this yet. self._pending_workchains = 0 self._pending_workchains_cond = threading.Condition() # Protects self._pending_workchains. self._shutdown_hooks = [] self.num_workers = num_workers def add_shutdown_hook(self, hook): self._shutdown_hooks.append(hook) def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None): """Submit work to be executed in the background. :param work: The work to execute. :param workunit_parent: If specified, work is accounted for under this workunit. :param on_success: If specified, a callable taking a single argument, which will be a list of return values of each invocation, in order. Called only if all work succeeded. :param on_failure: If specified, a callable taking a single argument, which is an exception thrown in the work. :return: `multiprocessing.pool.MapResult` Don't do work in on_success: not only will it block the result handling thread, but that thread is not a worker and doesn't have a logging context etc. Use it just to submit further work to the pool. """ if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables. if on_success: on_success([]) else: def do_work(*args): self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent, on_failure=on_failure) return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success) def submit_async_work_chain(self, work_chain, workunit_parent, done_hook=None): """Submit work to be executed in the background. - work_chain: An iterable of Work instances. Will be invoked serially. Each instance may have a different cardinality. There is no output-input chaining: the argument tuples must already be present in each work instance. If any work throws an exception no subsequent work in the chain will be attempted. - workunit_parent: Work is accounted for under this workunit. - done_hook: If not None, invoked with no args after all work is done, or on error. """ def done(): if done_hook: done_hook() with self._pending_workchains_cond: self._pending_workchains -= 1 self._pending_workchains_cond.notify() def error(e): done() self._run_tracker.log(Report.ERROR, '{}'.format(e)) # We filter out Nones defensively. There shouldn't be any, but if a bug causes one, # Pants might hang indefinitely without this filtering. work_iter = (_f for _f in work_chain if _f) def submit_next(): try: self.submit_async_work(next(work_iter), workunit_parent=workunit_parent, on_success=lambda x: submit_next(), on_failure=error) except StopIteration: done() # The success case. with self._pending_workchains_cond: self._pending_workchains += 1 try: submit_next() except Exception as e: # Handles errors in the submission code. done() self._run_tracker.log(Report.ERROR, '{}'.format(e)) raise def submit_work_and_wait(self, work, workunit_parent=None): """Submit work to be executed on this pool, but wait for it to complete. - work: The work to execute. - workunit_parent: If specified, work is accounted for under this workunit. Returns a list of return values of each invocation, in order. Throws if any invocation does. """ if work is None or len(work.args_tuples) == 0: # map hangs on 0-length iterables. return [] else: def do_work(*args): return self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent) # We need to specify a timeout explicitly, because otherwise python ignores SIGINT when waiting # on a condition variable, so we won't be able to ctrl-c out. return self._pool.map_async(do_work, work.args_tuples, chunksize=1).get(timeout=1000000000) def _do_work(self, func, args_tuple, workunit_name, workunit_parent, on_failure=None): try: if workunit_name: with self._run_tracker.new_workunit_under_parent(name=workunit_name, parent=workunit_parent): return func(*args_tuple) else: return func(*args_tuple) except KeyboardInterrupt: # If a worker thread intercepts a KeyboardInterrupt, we want to propagate it to the main # thread. _thread.interrupt_main() raise except Exception as e: if on_failure: # Note that here the work's workunit is closed. So, e.g., it's OK to use on_failure() # to close an ancestor workunit. on_failure(e) raise def shutdown(self): with self._pending_workchains_cond: while self._pending_workchains > 0: self._pending_workchains_cond.wait() self._pool.close() self._pool.join() for hook in self._shutdown_hooks: hook() def abort(self): self._pool.terminate() class SubprocPool(object): """Singleton for managing multiprocessing.Pool instances Subprocesses (including multiprocessing.Pool workers) can inherit locks in poorly written libraries (eg zlib) if other threads in the parent process happen to be holding them at the moment the worker is fork()'ed. Thus it is important to create any subprocesses BEFORE starting any threads, or they may deadlock mysteriously when sent a particular piece of work. This is accomplished in pants by these initializing pools early, when creating the RunTracker. However, in tests, RunTrackers are created repeatedly, as part of creating Contexts that are used briefly and discarded. Creating a new subprocess pool every time is expensive, and will lead to os.fork failing once too many processes are spawned. To avoid this, the pools themselves are kept in this singleton and new RunTrackers re-use them. """ _pool = None _lock = threading.Lock() _num_processes = multiprocessing.cpu_count() @classmethod def set_num_processes(cls, num_processes): cls._num_processes = num_processes @classmethod def foreground(cls): with cls._lock: if cls._pool is None: cls._pool = ThreadPool(processes=cls._num_processes) return cls._pool @classmethod def shutdown(cls, force): with cls._lock: old = cls._pool cls._pool = None if old: if force: old.terminate() else: old.close() old.join()
from __future__ import print_function import argparse import gzip import os import shutil import struct import tarfile import tempfile import h5py import numpy import six from numpy.testing import assert_equal, assert_raises from six.moves import range, zip, cPickle from fuel.converters.base import (fill_hdf5_file, check_exists, MissingInputFiles) from fuel.converters import binarized_mnist, cifar10, mnist, cifar100 if six.PY3: getbuffer = memoryview else: getbuffer = numpy.getbuffer class TestFillHDF5File(object): def setUp(self): self.h5file = h5py.File( 'file.hdf5', mode='w', driver='core', backing_store=False) self.train_features = numpy.arange( 16, dtype='uint8').reshape((4, 2, 2)) self.test_features = numpy.arange( 8, dtype='uint8').reshape((2, 2, 2)) + 3 self.train_targets = numpy.arange( 4, dtype='float32').reshape((4, 1)) self.test_targets = numpy.arange( 2, dtype='float32').reshape((2, 1)) + 3 def tearDown(self): self.h5file.close() def test_data(self): fill_hdf5_file( self.h5file, (('train', 'features', self.train_features, '.'), ('train', 'targets', self.train_targets), ('test', 'features', self.test_features), ('test', 'targets', self.test_targets))) assert_equal(self.h5file['features'], numpy.vstack([self.train_features, self.test_features])) assert_equal(self.h5file['targets'], numpy.vstack([self.train_targets, self.test_targets])) def test_dtype(self): fill_hdf5_file( self.h5file, (('train', 'features', self.train_features), ('train', 'targets', self.train_targets), ('test', 'features', self.test_features), ('test', 'targets', self.test_targets))) assert_equal(str(self.h5file['features'].dtype), 'uint8') assert_equal(str(self.h5file['targets'].dtype), 'float32') def test_multiple_length_error(self): train_targets = numpy.arange(8, dtype='float32').reshape((8, 1)) assert_raises(ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('train', 'targets', train_targets))) def test_multiple_dtype_error(self): test_features = numpy.arange( 8, dtype='float32').reshape((2, 2, 2)) + 3 assert_raises( ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('test', 'features', test_features))) def test_multiple_shape_error(self): test_features = numpy.arange( 16, dtype='uint8').reshape((2, 4, 2)) + 3 assert_raises( ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('test', 'features', test_features))) class TestMNIST(object): def setUp(self): MNIST_IMAGE_MAGIC = 2051 MNIST_LABEL_MAGIC = 2049 numpy.random.seed(9 + 5 + 2015) self.train_features_mock = numpy.random.randint( 0, 256, (10, 1, 28, 28)).astype('uint8') self.train_targets_mock = numpy.random.randint( 0, 10, (10, 1)).astype('uint8') self.test_features_mock = numpy.random.randint( 0, 256, (10, 1, 28, 28)).astype('uint8') self.test_targets_mock = numpy.random.randint( 0, 10, (10, 1)).astype('uint8') self.tempdir = tempfile.mkdtemp() self.train_images_path = os.path.join( self.tempdir, 'train-images-idx3-ubyte.gz') self.train_labels_path = os.path.join( self.tempdir, 'train-labels-idx1-ubyte.gz') self.test_images_path = os.path.join( self.tempdir, 't10k-images-idx3-ubyte.gz') self.test_labels_path = os.path.join( self.tempdir, 't10k-labels-idx1-ubyte.gz') self.wrong_images_path = os.path.join(self.tempdir, 'wrong_images.gz') self.wrong_labels_path = os.path.join(self.tempdir, 'wrong_labels.gz') with gzip.open(self.train_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(MNIST_IMAGE_MAGIC, 10, 28, 28))) f.write(getbuffer(self.train_features_mock.flatten())) with gzip.open(self.train_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(MNIST_LABEL_MAGIC, 10))) f.write(getbuffer(self.train_targets_mock.flatten())) with gzip.open(self.test_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(MNIST_IMAGE_MAGIC, 10, 28, 28))) f.write(getbuffer(self.test_features_mock.flatten())) with gzip.open(self.test_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(MNIST_LABEL_MAGIC, 10))) f.write(getbuffer(self.test_targets_mock.flatten())) with gzip.open(self.wrong_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(2000, 10, 28, 28))) with gzip.open(self.wrong_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(2000, 10))) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_mnist.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('mnist') subparser.set_defaults( directory=self.tempdir, output_file=filename) mnist.fill_subparser(subparser) args = parser.parse_args(['mnist']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal( h5file['features'][...], numpy.vstack( [self.train_features_mock, self.test_features_mock])) assert_equal( h5file['targets'][...], numpy.vstack([self.train_targets_mock, self.test_targets_mock])) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(str(h5file['targets'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) assert_equal(tuple(dim.label for dim in h5file['targets'].dims), ('batch', 'index')) def test_wrong_image_magic(self): assert_raises( ValueError, mnist.read_mnist_images, self.wrong_images_path) def test_wrong_label_magic(self): assert_raises( ValueError, mnist.read_mnist_labels, self.wrong_labels_path) def test_read_image_bool(self): assert_equal(mnist.read_mnist_images(self.train_images_path, 'bool'), self.train_features_mock >= 128) def test_read_image_float(self): rval = mnist.read_mnist_images(self.train_images_path, 'float32') assert_equal(rval, self.train_features_mock.astype('float32') / 255.) assert_equal(str(rval.dtype), 'float32') def test_read_image_value_error(self): assert_raises(ValueError, mnist.read_mnist_images, self.train_images_path, 'int32') class TestBinarizedMNIST(object): def setUp(self): numpy.random.seed(9 + 5 + 2015) self.train_mock = numpy.random.randint(0, 2, (5, 784)) self.valid_mock = numpy.random.randint(0, 2, (5, 784)) self.test_mock = numpy.random.randint(0, 2, (5, 784)) self.tempdir = tempfile.mkdtemp() numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_train.amat'), self.train_mock) numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_valid.amat'), self.valid_mock) numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_test.amat'), self.test_mock) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_binarized_mnist.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('binarized_mnist') subparser.set_defaults(directory=self.tempdir, output_file=filename) binarized_mnist.fill_subparser(subparser) args = parser.parse_args(['binarized_mnist']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal(h5file['features'][...], numpy.vstack([self.train_mock, self.valid_mock, self.test_mock]).reshape((-1, 1, 28, 28))) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) class TestCIFAR10(object): def setUp(self): numpy.random.seed(9 + 5 + 2015) self.train_features_mock = [ numpy.random.randint(0, 256, (10, 3, 32, 32)).astype('uint8') for i in range(5)] self.train_targets_mock = [ numpy.random.randint(0, 10, (10,)).astype('uint8') for i in range(5)] self.test_features_mock = numpy.random.randint( 0, 256, (10, 3, 32, 32)).astype('uint8') self.test_targets_mock = numpy.random.randint( 0, 10, (10,)).astype('uint8') self.tempdir = tempfile.mkdtemp() cwd = os.getcwd() os.chdir(self.tempdir) os.mkdir('cifar-10-batches-py') for i, (x, y) in enumerate(zip(self.train_features_mock, self.train_targets_mock)): filename = os.path.join( 'cifar-10-batches-py', 'data_batch_{}'.format(i + 1)) with open(filename, 'wb') as f: cPickle.dump({'data': x, 'labels': y}, f) filename = os.path.join('cifar-10-batches-py', 'test_batch') with open(filename, 'wb') as f: cPickle.dump({'data': self.test_features_mock, 'labels': self.test_targets_mock}, f) with tarfile.open('cifar-10-python.tar.gz', 'w:gz') as tar_file: tar_file.add('cifar-10-batches-py') os.chdir(cwd) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_cifar10.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('cifar10') subparser.set_defaults(directory=self.tempdir, output_file=filename) cifar10.fill_subparser(subparser) args = parser.parse_args(['cifar10']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal( h5file['features'][...], numpy.vstack( self.train_features_mock + [self.test_features_mock])) assert_equal( h5file['targets'][...], numpy.hstack(self.train_targets_mock + [self.test_targets_mock]).reshape((-1, 1))) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(str(h5file['targets'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) assert_equal(tuple(dim.label for dim in h5file['targets'].dims), ('batch', 'index')) class TestCIFAR100(object): def setUp(self): numpy.random.seed(9 + 5 + 2015) self.train_features_mock = numpy.random.randint( 0, 256, (10, 3, 32, 32)).astype('uint8') self.train_fine_labels_mock = numpy.random.randint( 0, 100, (10,)).astype('uint8') self.train_coarse_labels_mock = numpy.random.randint( 0, 20, (10,)).astype('uint8') self.test_features_mock = numpy.random.randint( 0, 256, (10, 3, 32, 32)).astype('uint8') self.test_fine_labels_mock = numpy.random.randint( 0, 100, (10,)).astype('uint8') self.test_coarse_labels_mock = numpy.random.randint( 0, 20, (10,)).astype('uint8') self.tempdir = tempfile.mkdtemp() cwd = os.getcwd() os.chdir(self.tempdir) os.mkdir('cifar-100-python') filename = os.path.join('cifar-100-python', 'train') with open(filename, 'wb') as f: cPickle.dump({'data': self.train_features_mock.reshape((10, -1)), 'fine_labels': self.train_fine_labels_mock, 'coarse_labels': self.train_coarse_labels_mock}, f) filename = os.path.join('cifar-100-python', 'test') with open(filename, 'wb') as f: cPickle.dump({'data': self.test_features_mock.reshape((10, -1)), 'fine_labels': self.test_fine_labels_mock, 'coarse_labels': self.test_coarse_labels_mock}, f) with tarfile.open('cifar-100-python.tar.gz', 'w:gz') as tar_file: tar_file.add('cifar-100-python') os.chdir(cwd) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_cifar100.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('cifar100') subparser.set_defaults(directory=self.tempdir, output_file=filename) cifar100.fill_subparser(subparser) args = parser.parse_args(['cifar100']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal( h5file['features'][...], numpy.vstack([self.train_features_mock, self.test_features_mock])) assert_equal( h5file['fine_labels'][...], numpy.hstack([self.train_fine_labels_mock, self.test_fine_labels_mock]).reshape((-1, 1))) assert_equal( h5file['coarse_labels'][...], numpy.hstack([self.train_coarse_labels_mock, self.test_coarse_labels_mock]).reshape((-1, 1))) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(str(h5file['fine_labels'].dtype), 'uint8') assert_equal(str(h5file['coarse_labels'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) assert_equal(tuple(dim.label for dim in h5file['fine_labels'].dims), ('batch', 'index')) assert_equal(tuple(dim.label for dim in h5file['coarse_labels'].dims), ('batch', 'index')) def test_check_exists(): try: directory = tempfile.mkdtemp() with open(os.path.join(directory, 'abcdef.txt'), 'w') as f: print('\n', file=f) @check_exists(required_files=['abcdef.txt']) def foo(directory, a=None, b=None): pass try: foo(directory) except MissingInputFiles: assert False, "MissingInputFiles raised when files present" @check_exists(required_files=['ghijkl.txt']) def bar(directory, c=None, d=None): pass assert_raises(MissingInputFiles, bar, directory) @check_exists(required_files=['abcdef.txt', 'ghijkl.txt']) def baz(directory, x, y=None): pass assert_raises(MissingInputFiles, baz, directory, 9) try: baz(directory, 9) except MissingInputFiles as e: assert e.filenames == ['ghijkl.txt'] with open(os.path.join(directory, 'ghijkl.txt'), 'w') as f: print('\n\n', file=f) try: bar(directory) baz(directory, 44) except MissingInputFiles: assert False, "MissingInputFiles raised when files present" finally: os.remove(os.path.join(directory, 'abcdef.txt')) os.remove(os.path.join(directory, 'ghijkl.txt')) os.rmdir(directory)
#!/usr/bin/env python # Copyright (c) 2013, 2014 Intel Corporation. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=F0401 import optparse import os import re import shutil import subprocess import sys from app_info import AppInfo from customize import VerifyPackageName, CustomizeAll, \ ParseParameterForCompressor from handle_permissions import permission_mapping_table from manifest_json_parser import HandlePermissionList from manifest_json_parser import ManifestJsonParser NATIVE_LIBRARY = 'libxwalkcore.so' def CleanDir(path): if os.path.exists(path): shutil.rmtree(path) def AllArchitectures(): return ("x86", "arm") def ConvertArchNameToArchFolder(arch): arch_dict = { 'x86': 'x86', 'arm': 'armeabi-v7a' } return arch_dict.get(arch, None) def AddExeExtensions(name): exts_str = os.environ.get('PATHEXT', '').lower() exts = [_f for _f in exts_str.split(os.pathsep) if _f] result = [] result.append(name) for e in exts: result.append(name + e) return result def RunCommand(command, verbose=False, shell=False): """Runs the command list, print the output, and propagate its result.""" proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell) if not shell: output = proc.communicate()[0] result = proc.returncode if verbose: print(output.decode("utf-8").strip()) if result != 0: print ('Command "%s" exited with non-zero exit code %d' % (' '.join(command), result)) sys.exit(result) return output.decode("utf-8") def Which(name): """Searches PATH for executable files with the given name, also taking PATHEXT into account. Returns the first existing match, or None if no matches are found.""" for path in os.environ.get('PATH', '').split(os.pathsep): for filename in AddExeExtensions(name): full_path = os.path.join(path, filename) if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path return None def GetAndroidApiLevel(): """Get Highest Android target level installed. return -1 if no targets have been found. """ target_output = RunCommand(['android', 'list', 'target', '-c']) target_regex = re.compile(r'android-(\d+)') targets = [int(i) for i in target_regex.findall(target_output)] targets.extend([-1]) return max(targets) def GetVersion(path): """Get the version of this python tool.""" version_str = 'Crosswalk app packaging tool version is ' file_handle = open(path, 'r') src_content = file_handle.read() version_nums = re.findall(r'\d+', src_content) version_str += ('.').join(version_nums) file_handle.close() return version_str def ContainsNativeLibrary(path): return os.path.isfile(os.path.join(path, NATIVE_LIBRARY)) def ParseManifest(options): parser = ManifestJsonParser(os.path.expanduser(options.manifest)) if not options.name: options.name = parser.GetAppName() if not options.app_version: options.app_version = parser.GetVersion() if not options.app_versionCode and not options.app_versionCodeBase: options.app_versionCode = 1 if parser.GetDescription(): options.description = parser.GetDescription() if parser.GetPermissions(): options.permissions = parser.GetPermissions() if parser.GetAppUrl(): options.app_url = parser.GetAppUrl() elif parser.GetAppLocalPath(): options.app_local_path = parser.GetAppLocalPath() else: print('Error: there is no app launch path defined in manifest.json.') sys.exit(9) if parser.GetAppRoot(): options.app_root = parser.GetAppRoot() options.icon_dict = parser.GetIcons() if parser.GetOrientation(): options.orientation = parser.GetOrientation() if parser.GetFullScreenFlag().lower() == 'true': options.fullscreen = True elif parser.GetFullScreenFlag().lower() == 'false': options.fullscreen = False return parser def ParseXPK(options, out_dir): cmd = ['python', 'parse_xpk.py', '--file=%s' % os.path.expanduser(options.xpk), '--out=%s' % out_dir] RunCommand(cmd) if options.manifest: print ('Use the manifest from XPK by default ' 'when "--xpk" option is specified, and ' 'the "--manifest" option would be ignored.') sys.exit(7) if os.path.isfile(os.path.join(out_dir, 'manifest.json')): options.manifest = os.path.join(out_dir, 'manifest.json') else: print('XPK doesn\'t contain manifest file.') sys.exit(8) def FindExtensionJars(root_path): ''' Find all .jar files for external extensions. ''' extension_jars = [] if not os.path.exists(root_path): return extension_jars for afile in os.listdir(root_path): if os.path.isdir(os.path.join(root_path, afile)): base_name = os.path.basename(afile) extension_jar = os.path.join(root_path, afile, base_name + '.jar') if os.path.isfile(extension_jar): extension_jars.append(extension_jar) return extension_jars # Follows the recommendation from # http://software.intel.com/en-us/blogs/2012/11/12/how-to-publish- # your-apps-on-google-play-for-x86-based-android-devices-using def MakeVersionCode(options): ''' Construct a version code''' if options.app_versionCode: return options.app_versionCode # First digit is ABI, ARM=2, x86=6 abi = '0' if options.arch == 'arm': abi = '2' if options.arch == 'x86': abi = '6' b = '0' if options.app_versionCodeBase: b = str(options.app_versionCodeBase) if len(b) > 7: print('Version code base must be 7 digits or less: ' 'versionCodeBase=%s' % (b)) sys.exit(12) # zero pad to 7 digits, middle digits can be used for other # features, according to recommendation in URL return '%s%s' % (abi, b.zfill(7)) def Customize(options, app_info, manifest): app_info.package = options.package app_info.app_name = options.name # 'org.xwalk.my_first_app' => 'MyFirstApp' android_name = options.package.split('.')[-1].split('_') app_info.android_name = ''.join([i.capitalize() for i in android_name if i]) if options.app_version: app_info.app_version = options.app_version app_info.app_versionCode = MakeVersionCode(options) if options.app_root: app_info.app_root = os.path.expanduser(options.app_root) if options.enable_remote_debugging: app_info.remote_debugging = '--enable-remote-debugging' if options.fullscreen: app_info.fullscreen_flag = '-f' if options.orientation: app_info.orientation = options.orientation if options.icon: app_info.icon = '%s' % os.path.expanduser(options.icon) CustomizeAll(app_info, options.description, options.icon_dict, options.permissions, options.app_url, options.app_local_path, options.keep_screen_on, options.extensions, manifest, options.xwalk_command_line, options.compressor) def Execution(options, name): android_path = Which('android') if android_path is None: print('The "android" binary could not be found. Check your Android SDK ' 'installation and your PATH environment variable.') sys.exit(1) api_level = GetAndroidApiLevel() if api_level < 14: print('Please install Android API level (>=14) first.') sys.exit(3) target_string = 'android-%d' % api_level if options.keystore_path: key_store = os.path.expanduser(options.keystore_path) if options.keystore_alias: key_alias = options.keystore_alias else: print('Please provide an alias name of the developer key.') sys.exit(6) if options.keystore_passcode: key_code = options.keystore_passcode else: key_code = None if options.keystore_alias_passcode: key_alias_code = options.keystore_alias_passcode else: key_alias_code = None else: print ('Use xwalk\'s keystore by default for debugging. ' 'Please switch to your keystore when distributing it to app market.') key_store = 'xwalk-debug.keystore' key_alias = 'xwalkdebugkey' key_code = 'xwalkdebug' key_alias_code = 'xwalkdebug' # Check whether ant is installed. try: cmd = ['ant', '-version'] RunCommand(cmd, shell=True) except EnvironmentError: print('Please install ant first.') sys.exit(4) # Update android project for app and xwalk_core_library. update_project_cmd = ['android', 'update', 'project', '--path', name, '--target', target_string, '--name', name] if options.mode == 'embedded': RunCommand(['android', 'update', 'lib-project', '--path', os.path.join(name, 'xwalk_core_library'), '--target', target_string]) update_project_cmd.extend(['-l', 'xwalk_core_library']) else: # Shared mode doesn't need xwalk_runtime_java.jar. os.remove(os.path.join(name, 'libs', 'xwalk_runtime_java.jar')) RunCommand(update_project_cmd) # Check whether external extensions are included. extensions_string = 'xwalk-extensions' extensions_dir = os.path.join(os.getcwd(), name, extensions_string) external_extension_jars = FindExtensionJars(extensions_dir) for external_extension_jar in external_extension_jars: shutil.copyfile(external_extension_jar, os.path.join(name, 'libs', os.path.basename(external_extension_jar))) if options.mode == 'embedded': # Remove existing native libraries in xwalk_core_library, they are probably # for the last execution to make apk for another CPU arch. # And then copy the native libraries for the specified arch into # xwalk_core_library. arch = ConvertArchNameToArchFolder(options.arch) if not arch: print ('Invalid CPU arch: %s.' % arch) sys.exit(10) library_lib_path = os.path.join(name, 'xwalk_core_library', 'libs') for dir_name in os.listdir(library_lib_path): lib_dir = os.path.join(library_lib_path, dir_name) if ContainsNativeLibrary(lib_dir): shutil.rmtree(lib_dir) native_lib_path = os.path.join(name, 'native_libs', arch) if ContainsNativeLibrary(native_lib_path): shutil.copytree(native_lib_path, os.path.join(library_lib_path, arch)) else: print('No %s native library has been found for creating a Crosswalk ' 'embedded APK.' % arch) sys.exit(10) ant_cmd = ['ant', 'release', '-f', os.path.join(name, 'build.xml')] if not options.verbose: ant_cmd.extend(['-quiet']) ant_cmd.extend(['-Dkey.store="%s"' % os.path.abspath(key_store)]) ant_cmd.extend(['-Dkey.alias="%s"' % key_alias]) if key_code: ant_cmd.extend(['-Dkey.store.password="%s"' % key_code]) if key_alias_code: ant_cmd.extend(['-Dkey.alias.password="%s"' % key_alias_code]) ant_result = subprocess.call(ant_cmd) if ant_result != 0: print('Command "%s" exited with non-zero exit code %d' % (' '.join(ant_cmd), ant_result)) sys.exit(ant_result) src_file = os.path.join(name, 'bin', '%s-release.apk' % name) package_name = name if options.app_version: package_name += ('_' + options.app_version) if options.mode == 'shared': dst_file = os.path.join(options.target_dir, '%s.apk' % package_name) elif options.mode == 'embedded': dst_file = os.path.join(options.target_dir, '%s_%s.apk' % (package_name, options.arch)) shutil.copyfile(src_file, dst_file) def PrintPackageInfo(options, name, packaged_archs): package_name_version = os.path.join(options.target_dir, name) if options.app_version: package_name_version += '_' + options.app_version if len(packaged_archs) == 0: print ('A non-platform specific APK for the web application "%s" was ' 'generated successfully at\n%s.apk. It requires a shared Crosswalk ' 'Runtime to be present.' % (name, package_name_version)) return for arch in packaged_archs: print ('An APK for the web application "%s" including the Crosswalk ' 'Runtime built for %s was generated successfully, which can be ' 'found at\n%s_%s.apk.' % (name, arch, package_name_version, arch)) all_archs = set(AllArchitectures()) if len(packaged_archs) != len(all_archs): missed_archs = all_archs - set(packaged_archs) print ('\n\nWARNING: ') print ('This APK will only work on %s based Android devices. Consider ' 'building for %s as well.' % (', '.join(packaged_archs), ', '.join(missed_archs))) else: print ('\n\n%d APKs were created for %s devices. ' % (len(all_archs), ', '.join(all_archs))) print ('Please install the one that matches the processor architecture ' 'of your device.\n\n') print ('If you are going to submit this application to an application ' 'store, please make sure you submit both packages.\nInstructions ' 'for submitting multiple APKs to Google Play Store are available ' 'here:\nhttps://software.intel.com/en-us/html5/articles/submitting' '-multiple-crosswalk-apk-to-google-play-store') def MakeApk(options, app_info, manifest): Customize(options, app_info, manifest) name = app_info.android_name packaged_archs = [] if options.mode == 'shared': Execution(options, name) elif options.mode == 'embedded': # Copy xwalk_core_library into app folder and move the native libraries # out. # When making apk for specified CPU arch, will only include the # corresponding native library by copying it back into xwalk_core_library. target_library_path = os.path.join(name, 'xwalk_core_library') shutil.copytree('xwalk_core_library', target_library_path) library_lib_path = os.path.join(target_library_path, 'libs') native_lib_path = os.path.join(name, 'native_libs') os.makedirs(native_lib_path) available_archs = [] for dir_name in os.listdir(library_lib_path): lib_dir = os.path.join(library_lib_path, dir_name) if ContainsNativeLibrary(lib_dir): shutil.move(lib_dir, os.path.join(native_lib_path, dir_name)) available_archs.append(dir_name) if options.arch: Execution(options, name) packaged_archs.append(options.arch) else: # If the arch option is unspecified, all of available platform APKs # will be generated. valid_archs = ['x86', 'armeabi-v7a'] for arch in valid_archs: if arch in available_archs: if arch.find('x86') != -1: options.arch = 'x86' elif arch.find('arm') != -1: options.arch = 'arm' Execution(options, name) packaged_archs.append(options.arch) else: print('Warning: failed to create package for arch "%s" ' 'due to missing native library' % arch) if len(packaged_archs) == 0: print('No packages created, aborting') sys.exit(13) PrintPackageInfo(options, name, packaged_archs) def main(argv): parser = optparse.OptionParser() parser.add_option('-v', '--version', action='store_true', dest='version', default=False, help='The version of this python tool.') parser.add_option('--verbose', action="store_true", dest='verbose', default=False, help='Print debug messages.') info = ('The packaging mode of the web application. The value \'shared\' ' 'means that the runtime is shared across multiple application ' 'instances and that the runtime needs to be distributed separately. ' 'The value \'embedded\' means that the runtime is embedded into the ' 'application itself and distributed along with it.' 'Set the default mode as \'embedded\'. For example: --mode=embedded') parser.add_option('--mode', choices=('embedded', 'shared'), default='embedded', help=info) info = ('The target architecture of the embedded runtime. Supported values ' 'are \'x86\' and \'arm\'. Note, if undefined, APKs for all possible ' 'architestures will be generated.') parser.add_option('--arch', choices=AllArchitectures(), help=info) group = optparse.OptionGroup(parser, 'Application Source Options', 'This packaging tool supports 3 kinds of web application source: ' '1) XPK package; 2) manifest.json; 3) various command line options, ' 'for example, \'--app-url\' for website, \'--app-root\' and ' '\'--app-local-path\' for local web application.') info = ('The path of the XPK package. For example, --xpk=/path/to/xpk/file') group.add_option('--xpk', help=info) info = ('The manifest file with the detail description of the application. ' 'For example, --manifest=/path/to/your/manifest/file') group.add_option('--manifest', help=info) info = ('The url of application. ' 'This flag allows to package website as apk. For example, ' '--app-url=http://www.intel.com') group.add_option('--app-url', help=info) info = ('The root path of the web app. ' 'This flag allows to package local web app as apk. For example, ' '--app-root=/root/path/of/the/web/app') group.add_option('--app-root', help=info) info = ('The relative path of entry file based on the value from ' '\'app_root\'. This flag should work with \'--app-root\' together. ' 'For example, --app-local-path=/relative/path/of/entry/file') group.add_option('--app-local-path', help=info) parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Mandatory arguments', 'They are used for describing the APK information through ' 'command line options.') info = ('The apk name. For example, --name="Your Application Name"') group.add_option('--name', help=info) info = ('The package name. For example, ' '--package=com.example.YourPackage') group.add_option('--package', help=info) parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Optional arguments', 'They are used for various settings for applications through ' 'command line options.') info = ('The version name of the application. ' 'For example, --app-version=1.0.0') group.add_option('--app-version', help=info) info = ('The version code of the application. ' 'For example, --app-versionCode=24') group.add_option('--app-versionCode', type='int', help=info) info = ('The version code base of the application. Version code will ' 'be made by adding a prefix based on architecture to the version ' 'code base. For example, --app-versionCodeBase=24') group.add_option('--app-versionCodeBase', type='int', help=info) info = ('Use command lines.' 'Crosswalk is powered by Chromium and supports Chromium command line.' 'For example, ' '--xwalk-command-line=\'--chromium-command-1 --xwalk-command-2\'') group.add_option('--xwalk-command-line', default='', help=info) info = ('The description of the application. For example, ' '--description=YourApplicationDescription') group.add_option('--description', help=info) group.add_option('--enable-remote-debugging', action='store_true', dest='enable_remote_debugging', default=False, help='Enable remote debugging.') info = ('The list of external extension paths splitted by OS separators. ' 'The separators are \':\' , \';\' and \':\' on Linux, Windows and ' 'Mac OS respectively. For example, ' '--extensions=/path/to/extension1:/path/to/extension2.') group.add_option('--extensions', help=info) group.add_option('-f', '--fullscreen', action='store_true', dest='fullscreen', default=False, help='Make application fullscreen.') group.add_option('--keep-screen-on', action='store_true', default=False, help='Support keeping screen on') info = ('The path of application icon. ' 'Such as: --icon=/path/to/your/customized/icon') group.add_option('--icon', help=info) info = ('The orientation of the web app\'s display on the device. ' 'For example, --orientation=landscape. The default value is ' '\'unspecified\'. The permitted values are from Android: ' 'http://developer.android.com/guide/topics/manifest/' 'activity-element.html#screen') group.add_option('--orientation', help=info) info = ('The list of permissions to be used by web application. For example, ' '--permissions=geolocation:webgl') group.add_option('--permissions', help=info) info = ('Packaging tool will move the output APKS to the target directory') group.add_option('--target-dir', default=os.getcwd(), help=info) parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Keystore Options', 'The keystore is a signature from web developer, it\'s used when ' 'developer wants to distribute the applications.') info = ('The path to the developer keystore. For example, ' '--keystore-path=/path/to/your/developer/keystore') group.add_option('--keystore-path', help=info) info = ('The alias name of keystore. For example, --keystore-alias=name') group.add_option('--keystore-alias', help=info) info = ('The passcode of keystore. For example, --keystore-passcode=code') group.add_option('--keystore-passcode', help=info) info = ('Passcode for alias\'s private key in the keystore, ' 'For example, --keystore-alias-passcode=alias-code') group.add_option('--keystore-alias-passcode', help=info) info = ('Minify and obfuscate javascript and css.' '--compressor: compress javascript and css.' '--compressor=js: compress javascript.' '--compressor=css: compress css.') group.add_option('--compressor', dest='compressor', action='callback', callback=ParseParameterForCompressor, type='string', nargs=0, help=info) parser.add_option_group(group) options, _ = parser.parse_args() if len(argv) == 1: parser.print_help() return 0 if options.version: if os.path.isfile('VERSION'): print(GetVersion('VERSION')) return 0 else: parser.error('VERSION was not found, so Crosswalk\'s version could not ' 'be determined.') xpk_temp_dir = '' if options.xpk: xpk_name = os.path.splitext(os.path.basename(options.xpk))[0] xpk_temp_dir = xpk_name + '_xpk' ParseXPK(options, xpk_temp_dir) if options.app_root and not options.manifest: manifest_path = os.path.join(options.app_root, 'manifest.json') if os.path.exists(manifest_path): print('Using manifest.json distributed with the application.') options.manifest = manifest_path app_info = AppInfo() manifest = None if not options.manifest: # The checks here are really convoluted, but at the moment make_apk # misbehaves any of the following conditions is true. if options.app_url: # 1) --app-url must be passed without either --app-local-path or # --app-root. if options.app_root or options.app_local_path: parser.error('You must pass either "--app-url" or "--app-local-path" ' 'with "--app-root", but not all.') else: # 2) --app-url is not passed but only one of --app-local-path and # --app-root is set. if bool(options.app_root) != bool(options.app_local_path): parser.error('You must specify both "--app-local-path" and ' '"--app-root".') # 3) None of --app-url, --app-local-path and --app-root are passed. elif not options.app_root and not options.app_local_path: parser.error('You must pass either "--app-url" or "--app-local-path" ' 'with "--app-root".') if options.permissions: permission_list = options.permissions.split(':') else: print('Warning: all supported permissions on Android port are added. ' 'Refer to https://github.com/crosswalk-project/' 'crosswalk-website/wiki/Crosswalk-manifest') permission_list = permission_mapping_table.keys() options.permissions = HandlePermissionList(permission_list) options.icon_dict = {} else: try: manifest = ParseManifest(options) except SystemExit as ec: return ec.code if not options.name: parser.error('An APK name is required. Please use the "--name" option.') if not options.package: parser.error('A package name is required. Please use the "--package" ' 'option.') VerifyPackageName(options.package) if (options.app_root and options.app_local_path and not os.path.isfile(os.path.join(options.app_root, options.app_local_path))): print('Please make sure that the local path file of launching app ' 'does exist.') sys.exit(7) if options.target_dir: target_dir = os.path.abspath(os.path.expanduser(options.target_dir)) options.target_dir = target_dir if not os.path.isdir(target_dir): os.makedirs(target_dir) try: MakeApk(options, app_info, manifest) except SystemExit as ec: CleanDir(app_info.android_name) CleanDir('out') CleanDir(xpk_temp_dir) return ec.code return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ROUGE metric implementation. Copy from tf_seq2seq/seq2seq/metrics/rouge.py. This is a modified and slightly extended verison of https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py. """ import itertools import numpy as np def _get_ngrams(n, text): """Calcualtes n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set def _split_into_words(sentences): """Splits multiple sentences into words and flattens the result.""" return list(itertools.chain(*[_.split(" ") for _ in sentences])) def _get_word_ngrams(n, sentences): """Calculates word n-grams for multiple sentences.""" assert len(sentences) > 0 assert n > 0 words = _split_into_words(sentences) return _get_ngrams(n, words) def _len_lcs(x, y): """ Returns the length of the Longest Common Subsequence between sequences x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m] def _lcs(x, y): """ Computes the length of the longest common subsequence (lcs) between two strings. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = dict() for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table def _recon_lcs(x, y): """ Returns the Longest Subsequence between x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns: sequence: LCS of x and y """ i, j = len(x), len(y) table = _lcs(x, y) def _recon(i, j): """private recon calculation""" if i == 0 or j == 0: return [] elif x[i - 1] == y[j - 1]: return _recon(i - 1, j - 1) + [(x[i - 1], i)] elif table[i - 1, j] > table[i, j - 1]: return _recon(i - 1, j) else: return _recon(i, j - 1) recon_tuple = tuple([x[0] for x in _recon(i, j)]) return recon_tuple def rouge_n(evaluated_sentences, reference_sentences, n=2): """ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set n: Size of ngram. Defaults to 2. Returns: A tuple (f1, precision, recall) for ROUGE-N Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if evaluated_count == 0: precision = 0.0 else: precision = overlapping_count / evaluated_count if reference_count == 0: recall = 0.0 else: recall = overlapping_count / reference_count f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8)) # return overlapping_count / reference_count return f1_score, precision, recall def _f_p_r_lcs(llcs, m, n): """ Computes the LCS-based F-measure score Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Args: llcs: Length of LCS m: number of words in reference summary n: number of words in candidate summary Returns: Float. LCS-based F-measure score """ r_lcs = llcs / m p_lcs = llcs / n beta = p_lcs / (r_lcs + 1e-12) num = (1 + (beta**2)) * r_lcs * p_lcs denom = r_lcs + ((beta**2) * p_lcs) f_lcs = num / (denom + 1e-12) return f_lcs, p_lcs, r_lcs def rouge_l_sentence_level(evaluated_sentences, reference_sentences): """ Computes ROUGE-L (sentence level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set Returns: A float: F_lcs Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") reference_words = _split_into_words(reference_sentences) evaluated_words = _split_into_words(evaluated_sentences) m = len(reference_words) n = len(evaluated_words) lcs = _len_lcs(evaluated_words, reference_words) return _f_p_r_lcs(lcs, m, n) def _union_lcs(evaluated_sentences, reference_sentence): """ Returns LCS_u(r_i, C) which is the LCS score of the union longest common subsequence between reference sentence ri and candidate summary C. For example if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5". The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5" and LCS_u(r_i, C) = 4/5. Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: float: LCS_u(r_i, C) ValueError: Raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") lcs_union = set() reference_words = _split_into_words([reference_sentence]) combined_lcs_length = 0 for eval_s in evaluated_sentences: evaluated_words = _split_into_words([eval_s]) lcs = set(_recon_lcs(reference_words, evaluated_words)) combined_lcs_length += len(lcs) lcs_union = lcs_union.union(lcs) union_lcs_count = len(lcs_union) union_lcs_value = union_lcs_count / combined_lcs_length return union_lcs_value def rouge_l_summary_level(evaluated_sentences, reference_sentences): """ Computes ROUGE-L (summary level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: SUM(i,u) = SUM from i through u u = number of sentences in reference summary C = Candidate summary made up of v sentences m = number of words in reference summary n = number of words in candidate summary Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: A float: F_lcs Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") # total number of words in reference sentences m = len(_split_into_words(reference_sentences)) # total number of words in evaluated sentences n = len(_split_into_words(evaluated_sentences)) union_lcs_sum_across_all_references = 0 for ref_s in reference_sentences: union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s) return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n) def rouge(hypotheses, references): """Calculates average rouge scores for a list of hypotheses and references""" # Filter out hyps that are of 0 length # hyps_and_refs = zip(hypotheses, references) # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0] # hypotheses, references = zip(*hyps_and_refs) # Calculate ROUGE-1 F1, precision, recall scores rouge_1 = [ rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references) ] rouge_1_f, rouge_1_p, rouge_1_r = list(map(np.mean, list(zip(*rouge_1)))) # Calculate ROUGE-2 F1, precision, recall scores rouge_2 = [ rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references) ] rouge_2_f, rouge_2_p, rouge_2_r = list(map(np.mean, list(zip(*rouge_2)))) # Calculate ROUGE-L F1, precision, recall scores rouge_l = [ rouge_l_sentence_level([hyp], [ref]) for hyp, ref in zip(hypotheses, references) ] rouge_l_f, rouge_l_p, rouge_l_r = list(map(np.mean, list(zip(*rouge_l)))) return { "rouge_1/f_score": rouge_1_f, "rouge_1/r_score": rouge_1_r, "rouge_1/p_score": rouge_1_p, "rouge_2/f_score": rouge_2_f, "rouge_2/r_score": rouge_2_r, "rouge_2/p_score": rouge_2_p, "rouge_l/f_score": rouge_l_f, "rouge_l/r_score": rouge_l_r, "rouge_l/p_score": rouge_l_p, }
################################################################## # Code for testing the variational Multi-Stage Generative Model. # ################################################################## # basic python import numpy as np import numpy.random as npr import cPickle # theano business import theano import theano.tensor as T # phil's sweetness import utils from NetLayers import relu_actfun, softplus_actfun, tanh_actfun from HydraNet import HydraNet from GPSImputer import GPSImputer, load_gpsimputer_from_file from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \ row_shuffle, to_fX RESULT_PATH = "IMP_MNIST_GPSI/" ############################### ############################### ## TEST GPS IMPUTER ON MNIST ## ############################### ############################### def test_mnist(step_type='add', imp_steps=6, occ_dim=15, drop_prob=0.0): ######################################### # Format the result tag more thoroughly # ######################################### dp_int = int(100.0 * drop_prob) result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type) ########################## # Get some training data # ########################## rng = np.random.RandomState(1234) dataset = 'data/mnist.pkl.gz' datasets = load_udm(dataset, as_shared=False, zero_mean=False) Xtr = datasets[0][0] Xva = datasets[1][0] Xte = datasets[2][0] # Merge validation set and training set, and test on test set. Xtr = np.concatenate((Xtr, Xva), axis=0) Xva = Xte Xtr = to_fX(shift_and_scale_into_01(Xtr)) Xva = to_fX(shift_and_scale_into_01(Xva)) tr_samples = Xtr.shape[0] va_samples = Xva.shape[0] batch_size = 200 batch_reps = 1 all_pix_mean = np.mean(np.mean(Xtr, axis=1)) data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) ) ############################################################ # Setup some parameters for the Iterative Refinement Model # ############################################################ x_dim = Xtr.shape[1] z_dim = 100 init_scale = 1.0 use_bn = False x_in_sym = T.matrix('x_in_sym') x_out_sym = T.matrix('x_out_sym') x_mask_sym = T.matrix('x_mask_sym') ################# # p_zi_given_xi # ################# params = {} shared_config = \ [ {'layer_type': 'fc', 'in_chans': x_dim, 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn}, \ {'layer_type': 'fc', 'in_chans': 800, 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn} ] out_layer = { 'layer_type': 'fc', 'in_chans': 800, 'out_chans': z_dim, 'activation': relu_actfun, 'apply_bn': False } output_config = [out_layer, out_layer] params['shared_config'] = shared_config params['output_config'] = output_config params['init_scale'] = 1.0 params['build_theano_funcs'] = False p_zi_given_xi = HydraNet(rng=rng, Xd=x_in_sym, \ params=params, shared_param_dicts=None) p_zi_given_xi.init_biases(0.0) ################### # p_sip1_given_zi # ################### params = {} shared_config = \ [ {'layer_type': 'fc', 'in_chans': z_dim, 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn}, \ {'layer_type': 'fc', 'in_chans': 800, 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn} ] out_layer = { 'layer_type': 'fc', 'in_chans': 800, 'out_chans': x_dim, 'activation': relu_actfun, 'apply_bn': False } output_config = [out_layer, out_layer, out_layer] params['shared_config'] = shared_config params['output_config'] = output_config params['init_scale'] = 1.0 params['build_theano_funcs'] = False p_sip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \ params=params, shared_param_dicts=None) p_sip1_given_zi.init_biases(0.0) ################# # q_zi_given_xi # ################# params = {} shared_config = \ [ {'layer_type': 'fc', 'in_chans': (x_dim+x_dim), 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn}, \ {'layer_type': 'fc', 'in_chans': 800, 'out_chans': 800, 'activation': relu_actfun, 'apply_bn': use_bn} ] out_layer = { 'layer_type': 'fc', 'in_chans': 800, 'out_chans': z_dim, 'activation': relu_actfun, 'apply_bn': False } output_config = [out_layer, out_layer] params['shared_config'] = shared_config params['output_config'] = output_config params['init_scale'] = 1.0 params['build_theano_funcs'] = False q_zi_given_xi = HydraNet(rng=rng, Xd=x_in_sym, \ params=params, shared_param_dicts=None) q_zi_given_xi.init_biases(0.0) ########################################################### # Define parameters for the GPSImputer, and initialize it # ########################################################### print("Building the GPSImputer...") gpsi_params = {} gpsi_params['x_dim'] = x_dim gpsi_params['z_dim'] = z_dim # switch between direct construction and construction via p_x_given_si gpsi_params['imp_steps'] = imp_steps gpsi_params['step_type'] = step_type gpsi_params['x_type'] = 'bernoulli' gpsi_params['obs_transform'] = 'sigmoid' GPSI = GPSImputer(rng=rng, x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, p_zi_given_xi=p_zi_given_xi, p_sip1_given_zi=p_sip1_given_zi, q_zi_given_xi=q_zi_given_xi, params=gpsi_params, shared_param_dicts=None) ################################################################ # Apply some updates, to check that they aren't totally broken # ################################################################ log_name = "{}_RESULTS.txt".format(result_tag) out_file = open(log_name, 'wb') costs = [0. for i in range(10)] learn_rate = 0.0001 momentum = 0.90 batch_idx = np.arange(batch_size) + tr_samples for i in range(200000): scale = min(1.0, ((i+1) / 5000.0)) if (((i + 1) % 15000) == 0): learn_rate = learn_rate * 0.95 # get the indices of training samples for this batch update batch_idx += batch_size if (np.max(batch_idx) >= tr_samples): # we finished an "epoch", so we rejumble the training set Xtr = row_shuffle(Xtr) batch_idx = np.arange(batch_size) # set sgd and objective function hyperparams for this update GPSI.set_sgd_params(lr=scale*learn_rate, \ mom_1=scale*momentum, mom_2=0.98) GPSI.set_train_switch(1.0) GPSI.set_lam_nll(lam_nll=1.0) GPSI.set_lam_kld(lam_kld_q=1.0, lam_kld_p=0.1, lam_kld_g=0.0) GPSI.set_lam_l2w(1e-5) # perform a minibatch update and record the cost for this batch xb = to_fX( Xtr.take(batch_idx, axis=0) ) xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) result = GPSI.train_joint(xi, xo, xm, batch_reps) # do diagnostics and general training tracking costs = [(costs[j] + result[j]) for j in range(len(result)-1)] if ((i % 500) == 0): costs = [(v / 500.0) for v in costs] str1 = "-- batch {0:d} --".format(i) str2 = " joint_cost: {0:.4f}".format(costs[0]) str3 = " nll_bound : {0:.4f}".format(costs[1]) str4 = " nll_cost : {0:.4f}".format(costs[2]) str5 = " kld_cost : {0:.4f}".format(costs[3]) str6 = " reg_cost : {0:.4f}".format(costs[4]) joint_str = "\n".join([str1, str2, str3, str4, str5, str6]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() costs = [0.0 for v in costs] if ((i % 1000) == 0): Xva = row_shuffle(Xva) # record an estimate of performance on the test set xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll, kld = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10) vfe = np.mean(nll) + np.mean(kld) str1 = " va_nll_bound : {}".format(vfe) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() if ((i % 2000) == 0): GPSI.save_to_file("{}_PARAMS.pkl".format(result_tag)) # Get some validation samples for evaluating model performance xb = to_fX( Xva[0:100] ) xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) xi = np.repeat(xi, 2, axis=0) xo = np.repeat(xo, 2, axis=0) xm = np.repeat(xm, 2, axis=0) # draw some sample imputations from the model samp_count = xi.shape[0] _, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False) seq_len = len(model_samps) seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1])) idx = 0 for s1 in range(samp_count): for s2 in range(seq_len): seq_samps[idx] = model_samps[s2][s1] idx += 1 file_name = "{0:s}_samples_ng_b{1:d}.png".format(result_tag, i) utils.visualize_samples(seq_samps, file_name, num_rows=20) ################################# ################################# ## CHECK MNIST IMPUTER RESULTS ## ################################# ################################# def test_mnist_results(step_type='add', imp_steps=6, occ_dim=15, drop_prob=0.0): ######################################### # Format the result tag more thoroughly # ######################################### dp_int = int(100.0 * drop_prob) result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type) ########################## # Get some training data # ########################## rng = np.random.RandomState(1234) dataset = 'data/mnist.pkl.gz' datasets = load_udm(dataset, as_shared=False, zero_mean=False) Xtr = datasets[0][0] Xva = datasets[1][0] Xte = datasets[2][0] # Merge validation set and training set, and test on test set. Xtr = np.concatenate((Xtr, Xva), axis=0) Xva = Xte Xtr = to_fX(shift_and_scale_into_01(Xtr)) Xva = to_fX(shift_and_scale_into_01(Xva)) tr_samples = Xtr.shape[0] va_samples = Xva.shape[0] batch_size = 250 batch_reps = 1 all_pix_mean = np.mean(np.mean(Xtr, axis=1)) data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) ) # Load parameters from a previously trained model print("Testing model load from file...") GPSI = load_gpsimputer_from_file(f_name="{}_PARAMS.pkl".format(result_tag), \ rng=rng) ################################################################ # Apply some updates, to check that they aren't totally broken # ################################################################ log_name = "{}_FINAL_RESULTS_NEW.txt".format(result_tag) out_file = open(log_name, 'wb') Xva = row_shuffle(Xva) # record an estimate of performance on the test set str0 = "GUIDED SAMPLE BOUND:" print(str0) xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=True) xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=True) nll = np.concatenate((nll_0, nll_1)) kld = np.concatenate((kld_0, kld_1)) vfe = np.mean(nll) + np.mean(kld) str1 = " va_nll_bound : {}".format(vfe) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str0, str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() # record an estimate of performance on the test set str0 = "UNGUIDED SAMPLE BOUND:" print(str0) xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=False) xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=False) nll = np.concatenate((nll_0, nll_1)) kld = np.concatenate((kld_0, kld_1)) str1 = " va_nll_bound : {}".format(np.mean(nll)) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str0, str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() if __name__=="__main__": ######### # MNIST # ######### # TRAINING # test_mnist(step_type='add', occ_dim=14, drop_prob=0.0) # test_mnist(step_type='add', occ_dim=16, drop_prob=0.0) # test_mnist(step_type='add', occ_dim=0, drop_prob=0.6) # test_mnist(step_type='add', occ_dim=0, drop_prob=0.8) # test_mnist(step_type='jump', occ_dim=14, drop_prob=0.0) # test_mnist(step_type='jump', occ_dim=16, drop_prob=0.0) # test_mnist(step_type='jump', occ_dim=0, drop_prob=0.6) # test_mnist(step_type='jump', occ_dim=0, drop_prob=0.8) # test_mnist(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='jump', imp_steps=5, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='jump', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='jump', imp_steps=1, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='jump', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='jump', imp_steps=15, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='lstm', imp_steps=5, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='lstm', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='lstm', imp_steps=1, occ_dim=0, drop_prob=0.9) test_mnist(step_type='lstm', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist(step_type='lstm', imp_steps=15, occ_dim=0, drop_prob=0.9) # RESULTS # test_mnist_results(step_type='add', occ_dim=14, drop_prob=0.0) # test_mnist_results(step_type='add', occ_dim=16, drop_prob=0.0) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.6) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.7) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.8) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', occ_dim=14, drop_prob=0.0) # test_mnist_results(step_type='jump', occ_dim=16, drop_prob=0.0) # test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.6) # test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.7) # test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.8) # test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', imp_steps=1, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', imp_steps=5, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='jump', imp_steps=15, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='lstm', imp_steps=1, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='lstm', imp_steps=2, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='lstm', imp_steps=5, occ_dim=0, drop_prob=0.9) test_mnist_results(step_type='lstm', imp_steps=10, occ_dim=0, drop_prob=0.9) # test_mnist_results(step_type='lstm', imp_steps=15, occ_dim=0, drop_prob=0.9)
EnsureSConsVersion(1,2) import os import inspect import platform def get_cuda_paths(): """Determines CUDA {bin,lib,include} paths returns (bin_path,lib_path,inc_path) """ # determine defaults if os.name == 'nt': bin_path = 'C:/CUDA/bin' lib_path = 'C:/CUDA/lib' inc_path = 'C:/CUDA/include' elif os.name == 'posix': bin_path = '/usr/local/cuda/bin' lib_path = '/usr/local/cuda/lib' inc_path = '/usr/local/cuda/include' else: raise ValueError, 'Error: unknown OS. Where is nvcc installed?' if platform.platform()[:6] != 'Darwin' and \ platform.machine()[-2:] == '64': lib_path += '64' # override with environement variables if 'CUDA_BIN_PATH' in os.environ: bin_path = os.path.abspath(os.environ['CUDA_BIN_PATH']) if 'CUDA_LIB_PATH' in os.environ: lib_path = os.path.abspath(os.environ['CUDA_LIB_PATH']) if 'CUDA_INC_PATH' in os.environ: inc_path = os.path.abspath(os.environ['CUDA_INC_PATH']) return (bin_path,lib_path,inc_path) def getTools(): result = [] if os.name == 'nt': result = ['default', 'msvc'] elif os.name == 'posix': result = ['default', 'gcc'] else: result = ['default'] return result; OldEnvironment = Environment; # this dictionary maps the name of a compiler program to a dictionary mapping the name of # a compiler switch of interest to the specific switch implementing the feature gCompilerOptions = { 'gcc' : {'warn_all' : '-Wall', 'warn_errors' : '-Werror', 'optimization' : '-O2', 'debug' : '-g', 'exception_handling' : '', 'omp' : '-fopenmp'}, 'g++' : {'warn_all' : '-Wall', 'warn_errors' : '-Werror', 'optimization' : '-O2', 'debug' : '-g', 'exception_handling' : '', 'omp' : '-fopenmp'}, 'cl' : {'warn_all' : '/Wall', 'warn_errors' : '/WX', 'optimization' : '/Ox', 'debug' : ['/Zi', '-D_DEBUG', '/MTd'], 'exception_handling' : '/EHsc', 'omp' : '/openmp'} } # this dictionary maps the name of a linker program to a dictionary mapping the name of # a linker switch of interest to the specific switch implementing the feature gLinkerOptions = { 'gcc' : {'debug' : ''}, 'g++' : {'debug' : ''}, 'link' : {'debug' : '/debug' } } def getCFLAGS(mode, warn, warnings_as_errors, CC): result = [] if mode == 'release': # turn on optimization result.append(gCompilerOptions[CC]['optimization']) elif mode == 'debug': # turn on debug mode result.append(gCompilerOptions[CC]['debug']) result.append('-DTHRUST_DEBUG') if warn: # turn on all warnings result.append(gCompilerOptions[CC]['warn_all']) if warnings_as_errors: # treat warnings as errors result.append(gCompilerOptions[CC]['warn_errors']) # avoid problems specific to windows if CC == 'cl': # avoid min/max problems due to windows.h result.append('/DNOMINMAX') # suppress warnings due to "decorated name length exceeded" result.append('/wd4503') return result def getCXXFLAGS(mode, warn, warnings_as_errors, CXX): result = [] if mode == 'release': # turn on optimization result.append(gCompilerOptions[CXX]['optimization']) elif mode == 'debug': # turn on debug mode result.append(gCompilerOptions[CXX]['debug']) # enable exception handling result.append(gCompilerOptions[CXX]['exception_handling']) if warn: # turn on all warnings result.append(gCompilerOptions[CXX]['warn_all']) if warnings_as_errors: # treat warnings as errors result.append(gCompilerOptions[CXX]['warn_errors']) return result def getNVCCFLAGS(mode, arch): result = ['-arch=' + arch] if platform.platform()[:6] == 'Darwin': if platform.machine()[-2:] == '64': result.append('-m64') else: result.append('-m32') if mode == 'debug': # turn on debug mode # XXX make this work when we've debugged nvcc -G #result.append('-G') pass return result def getLINKFLAGS(mode, LINK): result = [] if mode == 'debug': # turn on debug mode result.append(gLinkerOptions[LINK]['debug']) return result def Environment(): # allow the user discretion to choose the MSVC version vars = Variables() if os.name == 'nt': vars.Add(EnumVariable('MSVC_VERSION', 'MS Visual C++ version', None, allowed_values=('8.0', '9.0', '10.0'))) # add a variable to handle RELEASE/DEBUG mode vars.Add(EnumVariable('mode', 'Release versus debug mode', 'release', allowed_values = ('release', 'debug'))) # add a variable to handle compute capability vars.Add(EnumVariable('arch', 'Compute capability code generation', 'sm_10', allowed_values = ('sm_10', 'sm_11', 'sm_12', 'sm_13', 'sm_20', 'sm_21', 'sm_30'))) # add a variable to handle warnings if os.name == 'posix': vars.Add(BoolVariable('Wall', 'Enable all compilation warnings', 1)) else: vars.Add(BoolVariable('Wall', 'Enable all compilation warnings', 0)) # add a variable to treat warnings as errors vars.Add(BoolVariable('Werror', 'Treat warnings as errors', 0)) # create an Environment env = OldEnvironment(tools = getTools(), variables = vars) # get the absolute path to the directory containing # this source file thisFile = inspect.getabsfile(Environment) thisDir = os.path.dirname(thisFile) # enable nvcc env.Tool('nvcc', toolpath = [os.path.join(thisDir)]) # get C compiler switches env.Append(CFLAGS = getCFLAGS(env['mode'], env['Wall'], env['Werror'], env.subst('$CC'))) # get CXX compiler switches env.Append(CXXFLAGS = getCXXFLAGS(env['mode'], env['Wall'], env['Werror'], env.subst('$CXX'))) # get NVCC compiler switches env.Append(NVCCFLAGS = getNVCCFLAGS(env['mode'], env['arch'])) # get linker switches env.Append(LINKFLAGS = getLINKFLAGS(env['mode'], env.subst('$LINK'))) # get CUDA paths (cuda_exe_path,cuda_lib_path,cuda_inc_path) = get_cuda_paths() env.Append(LIBPATH = [cuda_lib_path]) env.Append(CPPPATH = [cuda_inc_path]) # link against the standard library # we don't have to do this on Windows if os.name == 'posix': env.Append(LIBS = ['stdc++']) # link against backend-specific runtimes # XXX we shouldn't have to link against cudart unless we're using the # cuda runtime, but cudafe inserts some dependencies when compiling .cu files # XXX ideally this gets handled in nvcc.py if possible env.Append(LIBS = ['cudart']) # import the LD_LIBRARY_PATH so we can run commands which depend # on shared libraries # XXX we should probably just copy the entire environment if os.name == 'posix': if env['PLATFORM'] == "darwin": env['ENV']['DYLD_LIBRARY_PATH'] = os.environ['DYLD_LIBRARY_PATH'] else: env['ENV']['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] # generate help text Help(vars.GenerateHelpText(env)) return env
<<<<<<< HEAD <<<<<<< HEAD """Unit tests for idlelib.AutoExpand""" import unittest from test.support import requires from tkinter import Text, Tk #from idlelib.idle_test.mock_tk import Text from idlelib.AutoExpand import AutoExpand class Dummy_Editwin: # AutoExpand.__init__ only needs .text def __init__(self, text): self.text = text class AutoExpandTest(unittest.TestCase): @classmethod def setUpClass(cls): if 'tkinter' in str(Text): requires('gui') cls.tk = Tk() cls.text = Text(cls.tk) else: cls.text = Text() cls.auto_expand = AutoExpand(Dummy_Editwin(cls.text)) @classmethod def tearDownClass(cls): if hasattr(cls, 'tk'): cls.tk.destroy() del cls.tk del cls.text, cls.auto_expand def tearDown(self): self.text.delete('1.0', 'end') def test_get_prevword(self): text = self.text previous = self.auto_expand.getprevword equal = self.assertEqual equal(previous(), '') text.insert('insert', 't') equal(previous(), 't') text.insert('insert', 'his') equal(previous(), 'this') text.insert('insert', ' ') equal(previous(), '') text.insert('insert', 'is') equal(previous(), 'is') text.insert('insert', '\nsample\nstring') equal(previous(), 'string') text.delete('3.0', 'insert') equal(previous(), '') text.delete('1.0', 'end') equal(previous(), '') def test_before_only(self): previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual self.text.insert('insert', 'ab ac bx ad ab a') equal(self.auto_expand.getwords(), ['ab', 'ad', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_after_only(self): # Also add punctuation 'noise' that should be ignored. text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'a, [ab] ac: () bx"" cd ac= ad ya') text.mark_set('insert', '1.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'ad', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'a') def test_both_before_after(self): text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'ab xy yz\n') text.insert('insert', 'a ac by ac') text.mark_set('insert', '2.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_other_expand_cases(self): text = self.text expand = self.auto_expand.expand_word_event equal = self.assertEqual # no expansion candidate found equal(self.auto_expand.getwords(), []) equal(expand('event'), 'break') text.insert('insert', 'bx cy dz a') equal(self.auto_expand.getwords(), []) # reset state by successfully expanding once # move cursor to another position and expand again text.insert('insert', 'ac xy a ac ad a') text.mark_set('insert', '1.7') expand('event') initial_state = self.auto_expand.state text.mark_set('insert', '1.end') expand('event') new_state = self.auto_expand.state self.assertNotEqual(initial_state, new_state) if __name__ == '__main__': unittest.main(verbosity=2) ======= """Unit tests for idlelib.AutoExpand""" import unittest from test.support import requires from tkinter import Text, Tk #from idlelib.idle_test.mock_tk import Text from idlelib.AutoExpand import AutoExpand class Dummy_Editwin: # AutoExpand.__init__ only needs .text def __init__(self, text): self.text = text class AutoExpandTest(unittest.TestCase): @classmethod def setUpClass(cls): if 'tkinter' in str(Text): requires('gui') cls.tk = Tk() cls.text = Text(cls.tk) else: cls.text = Text() cls.auto_expand = AutoExpand(Dummy_Editwin(cls.text)) @classmethod def tearDownClass(cls): if hasattr(cls, 'tk'): cls.tk.destroy() del cls.tk del cls.text, cls.auto_expand def tearDown(self): self.text.delete('1.0', 'end') def test_get_prevword(self): text = self.text previous = self.auto_expand.getprevword equal = self.assertEqual equal(previous(), '') text.insert('insert', 't') equal(previous(), 't') text.insert('insert', 'his') equal(previous(), 'this') text.insert('insert', ' ') equal(previous(), '') text.insert('insert', 'is') equal(previous(), 'is') text.insert('insert', '\nsample\nstring') equal(previous(), 'string') text.delete('3.0', 'insert') equal(previous(), '') text.delete('1.0', 'end') equal(previous(), '') def test_before_only(self): previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual self.text.insert('insert', 'ab ac bx ad ab a') equal(self.auto_expand.getwords(), ['ab', 'ad', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_after_only(self): # Also add punctuation 'noise' that should be ignored. text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'a, [ab] ac: () bx"" cd ac= ad ya') text.mark_set('insert', '1.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'ad', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'a') def test_both_before_after(self): text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'ab xy yz\n') text.insert('insert', 'a ac by ac') text.mark_set('insert', '2.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_other_expand_cases(self): text = self.text expand = self.auto_expand.expand_word_event equal = self.assertEqual # no expansion candidate found equal(self.auto_expand.getwords(), []) equal(expand('event'), 'break') text.insert('insert', 'bx cy dz a') equal(self.auto_expand.getwords(), []) # reset state by successfully expanding once # move cursor to another position and expand again text.insert('insert', 'ac xy a ac ad a') text.mark_set('insert', '1.7') expand('event') initial_state = self.auto_expand.state text.mark_set('insert', '1.end') expand('event') new_state = self.auto_expand.state self.assertNotEqual(initial_state, new_state) if __name__ == '__main__': unittest.main(verbosity=2) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= """Unit tests for idlelib.AutoExpand""" import unittest from test.support import requires from tkinter import Text, Tk #from idlelib.idle_test.mock_tk import Text from idlelib.AutoExpand import AutoExpand class Dummy_Editwin: # AutoExpand.__init__ only needs .text def __init__(self, text): self.text = text class AutoExpandTest(unittest.TestCase): @classmethod def setUpClass(cls): if 'tkinter' in str(Text): requires('gui') cls.tk = Tk() cls.text = Text(cls.tk) else: cls.text = Text() cls.auto_expand = AutoExpand(Dummy_Editwin(cls.text)) @classmethod def tearDownClass(cls): if hasattr(cls, 'tk'): cls.tk.destroy() del cls.tk del cls.text, cls.auto_expand def tearDown(self): self.text.delete('1.0', 'end') def test_get_prevword(self): text = self.text previous = self.auto_expand.getprevword equal = self.assertEqual equal(previous(), '') text.insert('insert', 't') equal(previous(), 't') text.insert('insert', 'his') equal(previous(), 'this') text.insert('insert', ' ') equal(previous(), '') text.insert('insert', 'is') equal(previous(), 'is') text.insert('insert', '\nsample\nstring') equal(previous(), 'string') text.delete('3.0', 'insert') equal(previous(), '') text.delete('1.0', 'end') equal(previous(), '') def test_before_only(self): previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual self.text.insert('insert', 'ab ac bx ad ab a') equal(self.auto_expand.getwords(), ['ab', 'ad', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_after_only(self): # Also add punctuation 'noise' that should be ignored. text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'a, [ab] ac: () bx"" cd ac= ad ya') text.mark_set('insert', '1.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'ad', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'ad') expand('event') equal(previous(), 'a') def test_both_before_after(self): text = self.text previous = self.auto_expand.getprevword expand = self.auto_expand.expand_word_event equal = self.assertEqual text.insert('insert', 'ab xy yz\n') text.insert('insert', 'a ac by ac') text.mark_set('insert', '2.1') equal(self.auto_expand.getwords(), ['ab', 'ac', 'a']) expand('event') equal(previous(), 'ab') expand('event') equal(previous(), 'ac') expand('event') equal(previous(), 'a') def test_other_expand_cases(self): text = self.text expand = self.auto_expand.expand_word_event equal = self.assertEqual # no expansion candidate found equal(self.auto_expand.getwords(), []) equal(expand('event'), 'break') text.insert('insert', 'bx cy dz a') equal(self.auto_expand.getwords(), []) # reset state by successfully expanding once # move cursor to another position and expand again text.insert('insert', 'ac xy a ac ad a') text.mark_set('insert', '1.7') expand('event') initial_state = self.auto_expand.state text.mark_set('insert', '1.end') expand('event') new_state = self.auto_expand.state self.assertNotEqual(initial_state, new_state) if __name__ == '__main__': unittest.main(verbosity=2) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Builder class, a minimal prototype class to build more chart types on top of it. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import import warnings from six import string_types from .attributes import AttrSpec, ColorAttr, CatAttr from .chart import Chart from .data_source import ChartDataSource from .models import CompositeGlyph from .properties import Dimension, ColumnLabel from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips from .data_source import OrderedAssigner from ..models.ranges import Range, Range1d, FactorRange from ..models.sources import ColumnDataSource from ..core.properties import (HasProps, Instance, List, String, Dict, Color, Bool, Tuple, Either, Enum) from ..core.enums import SortDirection from ..io import curdoc, curstate #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- def create_and_build(builder_class, *data, **kws): """A factory function for handling Chart and Builder generation. Returns: :class:`Chart` """ if getattr(builder_class, 'dimensions') is None: raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__) if getattr(builder_class, 'default_attributes') is None: raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__) builder_props = set(builder_class.properties()) | \ set(getattr(builder_class, "__deprecated_attributes__", [])) # append dimensions to the builder props for dim in builder_class.dimensions: builder_props.add(dim) # append attributes to the builder props for attr_name in builder_class.default_attributes.keys(): builder_props.add(attr_name) # create the new builder builder_kws = {k: v for k, v in kws.items() if k in builder_props} builder = builder_class(*data, **builder_kws) # create a chart to return, since there isn't one already chart_kws = {k: v for k, v in kws.items() if k not in builder_props} chart = Chart(**chart_kws) chart.add_builder(builder) chart.start_plot() return chart class Builder(HasProps): """ A prototype class to inherit each new chart Builder type. It provides useful methods to be used by the inherited builder classes, in order to automate most of the charts creation tasks and leave the core customization to specialized builder classes. In that pattern inherited builders just need to provide the following methods: Required: * :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be rendered into the plot. Here you should call the :meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can setup the legend for you. * :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the glyphs. This is called after glyph creation, so you are able to inspect the comp_glyphs for their minimum and maximum values. See the :meth:`~bokeh.charts.builder.Builder.create` method for more information on when this is called and how the builder provides the ranges to the containing :class:`Chart` using the :meth:`Chart.add_ranges` method. Optional: * :meth:`~bokeh.charts.builder.Builder.setup`: provides an area where subclasses of builder can introspect properties, setup attributes, or change property values. This is called before :meth:`~bokeh.charts.builder.Builder.process_data`. * :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area where subclasses of builder can manipulate the source data before renderers are created. """ # Optional Inputs x_range = Instance(Range) y_range = Instance(Range) xlabel = String() ylabel = String() xscale = String() yscale = String() palette = List(Color, help="""Optional input to override the default palette used by any color attribute. """) # Dimension Configuration """ The dimension labels that drive the position of the glyphs. Subclasses should implement this so that the Builder base class knows which dimensions it needs to operate on. An example for a builder working with cartesian x and y coordinates would be dimensions = ['x', 'y']. You should then instantiate the x and y dimensions as attributes of the subclass of builder using the :class:`Dimension <bokeh.charts.properties.Dimension>` class. One for x, as x = Dimension(...), and one as y = Dimension(...). """ dimensions = None # None because it MUST be overridden """ The dimension labels that must exist to produce the glyphs. This specifies what are the valid configurations for the chart, with the option of specifying the type of the columns. The :class:`~bokeh.charts.data_source.ChartDataSource` will inspect this property of your subclass of Builder and use this to fill in any required dimensions if no keyword arguments are used. """ req_dimensions = [] # Attribute Configuration attributes = Dict(String, Instance(AttrSpec), help=""" The attribute specs used to group data. This is a mapping between the role of the attribute spec (e.g. 'color') and the :class:`~bokeh.charts.attributes.AttrSpec` class (e.g., :class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this attributes property during runtime, which will consist of any attribute specs that are passed into the chart creation function (e.g., :class:`~bokeh.charts.Bar`), ones that are created for the user from simple input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`), or lastly, the attribute spec found in the default_attributes configured for the subclass of :class:`~bokeh.charts.builder.Builder`. """) """ The default attribute specs used to group data. This is where the subclass of Builder should specify what the default attributes are that will yield attribute values to each group of data, and any specific configuration. For example, the :class:`ColorAttr` utilizes a default palette for assigning color based on groups of data. If the user doesn't assign a column of the data to the associated attribute spec, then the default attrspec is used, which will yield a constant color value for each group of data. This is by default the first color in the default palette, but can be customized by setting the default color in the ColorAttr. """ default_attributes = None # None because it MUST be overridden # Derived properties (created by Builder at runtime) attribute_columns = List(ColumnLabel, help=""" All columns used for specifying attributes for the Chart. The Builder will set this value on creation so that the subclasses can know the distinct set of columns that are being used to assign attributes. """) comp_glyphs = List(Instance(CompositeGlyph), help=""" A list of composite glyphs, where each represents a unique subset of data. The composite glyph is a helper class that encapsulates all low level :class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of data. For example, the :class:`BoxGlyph` is a single class that yields each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The single Box represents a full array of values that are aggregated, and is made up of multiple :class:`~bokeh.models.glyphs.Rect` and :class:`~bokeh.models.glyphs.Segment` glyphs. """) labels = List(String, help="""Represents the unique labels to be used for legends.""") """List of attributes to use for legends.""" label_attributes = [] """ Used to assign columns to dimensions when no selections have been provided. The default behavior is provided by the :class:`OrderedAssigner`, which assigns a single column to each dimension available in the `Builder`'s `dims` property. """ column_selector = OrderedAssigner comp_glyph_types = List(Instance(CompositeGlyph)) sort_dim = Dict(String, Bool, default={}) sort_legend = List(Tuple(String, Bool), help=""" List of tuples to use for sorting the legend, in order that they should be used for sorting. This sorting can be different than the sorting used for the rest of the chart. For example, you might want to sort only on the column assigned to the color attribute, or sort it descending. The order of each tuple is (Column, Ascending). """) legend_sort_field = String(help=""" Attribute that should be used to sort the legend, for example: color, dash, maker, etc. Valid values for this property depend on the type of chart. """) legend_sort_direction = Enum(SortDirection, help=""" Sort direction to apply to :attr:`~bokeh.charts.builder.Builder.sort_legend`. Valid values are: `ascending` or `descending`. """) source = Instance(ColumnDataSource) tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None, help=""" Tells the builder to add tooltips to the chart by either using the columns specified to the chart attributes (True), or by generating tooltips for each column specified (list(str)), or by explicit specification of the tooltips using the valid input for the `HoverTool` tooltips kwarg. """) __deprecated_attributes__ = ('sort_legend',) def __init__(self, *args, **kws): """Common arguments to be used by all the inherited classes. Args: data (:ref:`userguide_charts_data_types`): source data for the chart legend (str, bool): the legend of your plot. The legend content is inferred from incoming input.It can be ``top_left``, ``top_right``, ``bottom_left``, ``bottom_right``. It is ``top_right`` is you set it as True. Attributes: source (obj): datasource object for your plot, initialized as a dummy None. x_range (obj): x-associated datarange object for you plot, initialized as a dummy None. y_range (obj): y-associated datarange object for you plot, initialized as a dummy None. groups (list): to be filled with the incoming groups of data. Useful for legend construction. data (dict): to be filled with the incoming data and be passed to the ChartDataSource for each Builder class. attr (list(AttrSpec)): to be filled with the new attributes created after loading the data dict. """ data = None if len(args) != 0 or len(kws) != 0: # chart dimensions can be literal dimensions or attributes attrs = list(self.default_attributes.keys()) dims = self.dimensions + attrs # pop the dimension inputs from kwargs data_args = {} for dim in dims: if dim in kws.keys(): data_args[dim] = kws[dim] # build chart data source from inputs, given the dimension configuration data_args['dims'] = tuple(dims) data_args['required_dims'] = tuple(self.req_dimensions) data_args['attrs'] = attrs data_args['column_assigner'] = self.column_selector data = ChartDataSource.from_data(*args, **data_args) # make sure that the builder dimensions have access to the chart data source for dim in self.dimensions: getattr(getattr(self, dim), 'set_data')(data) # handle input attrs and ensure attrs have access to data attributes = self._setup_attrs(data, kws) # remove inputs handled by dimensions and chart attributes for dim in dims: kws.pop(dim, None) else: attributes = dict() kws['attributes'] = attributes super(Builder, self).__init__(**kws) # collect unique columns used for attributes self.attribute_columns = collect_attribute_columns(**self.attributes) for k in self.__deprecated_attributes__: if k in kws: setattr(self, k, kws[k]) self._data = data self._legends = [] def _setup_attrs(self, data, kws): """Handle overridden attributes and initialize them with data. Makes sure that all attributes have access to the data source, which is used for mapping attributes to groups of data. Returns: None """ source = ColumnDataSource(data.df) attr_names = self.default_attributes.keys() custom_palette = kws.get('palette') attributes = dict() for attr_name in attr_names: attr = kws.pop(attr_name, None) # if given an attribute use it if isinstance(attr, AttrSpec): attributes[attr_name] = attr # if we are given columns, use those elif isinstance(attr, str) or isinstance(attr, list): attributes[attr_name] = self.default_attributes[attr_name]._clone() # override palette if available if isinstance(attributes[attr_name], ColorAttr): if custom_palette is not None: attributes[attr_name].iterable = custom_palette attributes[attr_name].setup(data=source, columns=attr) else: # override palette if available if (isinstance(self.default_attributes[attr_name], ColorAttr) and custom_palette is not None): attributes[attr_name] = self.default_attributes[attr_name]._clone() attributes[attr_name].iterable = custom_palette else: attributes[attr_name] = self.default_attributes[attr_name]._clone() # make sure all have access to data source for attr_name in attr_names: attributes[attr_name].update_data(data=source) return attributes def setup(self): """Perform any initial pre-processing, attribute config. Returns: None """ pass def process_data(self): """Make any global data manipulations before grouping. It has to be implemented by any of the inherited class representing each different chart type. It is the place where we make specific calculations for each chart. Returns: None """ pass def yield_renderers(self): """ Generator that yields the glyphs to be draw on the plot It has to be implemented by any of the inherited class representing each different chart type. Yields: :class:`GlyphRenderer` """ raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' % self.__class__.__name__) def set_ranges(self): """Calculate and set the x and y ranges. It has to be implemented by any of the subclasses of builder representing each different chart type, and is called after :meth:`yield_renderers`. Returns: None """ raise NotImplementedError('Subclasses of %s must implement _set_ranges.' % self.__class__.__name__) def get_dim_extents(self): """Helper method to retrieve maximum extents of all the renderers. Returns: a dict mapping between dimension and value for x_max, y_max, x_min, y_min """ return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]), 'y_max': max([renderer.y_max for renderer in self.comp_glyphs]), 'x_min': min([renderer.x_min for renderer in self.comp_glyphs]), 'y_min': min([renderer.y_min for renderer in self.comp_glyphs]) } def add_glyph(self, group, glyph): """Add a composite glyph. Manages the legend, since the builder might not want all attribute types used for the legend. Args: group (:class:`DataGroup`): the data the `glyph` is associated with glyph (:class:`CompositeGlyph`): the glyph associated with the `group` Returns: None """ if isinstance(glyph, list): for sub_glyph in glyph: self.comp_glyphs.append(sub_glyph) else: self.comp_glyphs.append(glyph) # handle cases where builders have specified which attributes to use for labels label = None if len(self.label_attributes) > 0: for attr in self.label_attributes: # this will get the last attribute group label for now if self.attributes[attr].columns is not None: label = self._get_group_label(group, attr=attr) # if no special case for labeling, just use the group label if label is None: label = self._get_group_label(group, attr='label') # add to legend if new and unique label if str(label) not in self.labels and label is not None: self._legends.append((label, glyph.renderers)) self.labels.append(label) def _get_group_label(self, group, attr='label'): """Get the label of the group by the attribute name. Args: group (:attr:`DataGroup`: the group of data attr (str, optional): the attribute name containing the label, defaults to 'label'. Returns: str: the label for the group """ if attr is 'label': label = group.label else: label = group[attr] if isinstance(label, dict): label = tuple(label.values()) return self._get_label(label) @staticmethod def _get_label(raw_label): """Converts a label by string or tuple to a string representation. Args: raw_label (str or tuple(any, any)): a unique identifier for the data group Returns: str: a label that is usable in charts """ # don't convert None type to string so we can test for it later if raw_label is None: return None if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \ len(raw_label) == 1: raw_label = raw_label[0] elif isinstance(raw_label, dict): raw_label = label_from_index_dict(raw_label) return str(raw_label) def collect_attr_kwargs(self): if hasattr(super(self.__class__, self), 'default_attributes'): attrs = set(self.default_attributes.keys()) - set( (super(self.__class__, self).default_attributes or {}).keys()) else: attrs = set() return attrs def get_group_kwargs(self, group, attrs): return {attr: group[attr] for attr in attrs} def create(self, chart=None): """Builds the renderers, adding them and other components to the chart. Args: chart (:class:`Chart`, optional): the chart that will contain the glyph renderers that the `Builder` produces. Returns: :class:`Chart` """ # call methods that allow customized setup by subclasses self.setup() self.process_data() # create and add renderers to chart renderers = self.yield_renderers() if chart is None: chart = Chart() chart.add_renderers(self, renderers) # handle ranges after renders, since ranges depend on aggregations # ToDo: should reconsider where this occurs self.set_ranges() chart.add_ranges('x', self.x_range) chart.add_ranges('y', self.y_range) # sort the legend if we are told to self._legends = self._sort_legend( self.legend_sort_field, self.legend_sort_direction, self._legends, self.attributes) # always contribute legends, let Chart sort it out chart.add_legend(self._legends) chart.add_labels('x', self.xlabel) chart.add_labels('y', self.ylabel) chart.add_scales('x', self.xscale) chart.add_scales('y', self.yscale) if self.tooltips is not None: tooltips = build_hover_tooltips(hover_spec=self.tooltips, chart_cols=self.attribute_columns) chart.add_tooltips(tooltips) return chart @classmethod def generate_help(cls): help_str = '' for comp_glyph in cls.comp_glyph_types: help_str += str(comp_glyph.glyph_properties()) return help_str @staticmethod def _sort_legend(legend_sort_field, legend_sort_direction, legends, attributes): """Sort legends sorted by looping though sort_legend items ( see :attr:`Builder.sort_legend` for more details) """ if legend_sort_field: if len(attributes[legend_sort_field].columns) > 0: # TODO(fpliger): attributes should be consistent and not # need any type checking but for # the moment it is not, specially when going # though a process like binning or when data # is built for HeatMap, Scatter, etc... item_order = [x[0] if isinstance(x, tuple) else x for x in attributes[legend_sort_field].items] item_order = [str(x) if not isinstance(x, string_types) else x for x in item_order] def foo(leg): return item_order.index(leg[0]) reverse = legend_sort_direction == 'descending' return list(sorted(legends, key=foo, reverse=reverse)) return legends @property def sort_legend(self): warnings.warn("Chart property `sort_legend` was deprecated in 0.12 \ and will be removed in the future. Use `legend_sort_field` and \ `legend_sort_direction` instead.") return [(self.legend_sort_field, self.legend_sort_direction)] @sort_legend.setter def sort_legend(self, value): warnings.warn("Chart property 'sort_legend' was deprecated in 0.12 \ and will be removed in the future. Use `legend_sort_field` and \ `legend_sort_direction` instead.") self.legend_sort_field, direction = value[0] if direction: self.legend_sort_direction = "ascending" else: self.legend_sort_direction = "descending" class XYBuilder(Builder): """Implements common functionality for XY Builders.""" x = Dimension('x') y = Dimension('y') dimensions = ['x', 'y'] req_dimensions = [['x'], ['y'], ['x', 'y']] default_attributes = {'color': ColorAttr()} def set_ranges(self): """Calculate and set the x and y ranges.""" # ToDo: handle when only single dimension is provided extents = self.get_dim_extents() endx = extents['x_max'] startx = extents['x_min'] self.x_range = self._get_range('x', startx, endx) endy = extents['y_max'] starty = extents['y_min'] self.y_range = self._get_range('y', starty, endy) if self.xlabel is None: if self.x.selection is not None: select = self.x.selection if not isinstance(select, list): select = [select] else: select = [''] self.xlabel = ', '.join(select) if self.ylabel is None: if self.y.selection is not None: select = self.y.selection if not isinstance(select, list): select = [select] else: select = [''] self.ylabel = ', '.join(select) def _get_range(self, dim, start, end): """Create a :class:`Range` for the :class:`Chart`. Args: dim (str): the name of the dimension, which is an attribute of the builder start: the starting value of the range end: the ending value of the range Returns: :class:`Range` """ dim_ref = getattr(self, dim) values = dim_ref.data dtype = dim_ref.dtype.name sort = self.sort_dim.get(dim) # object data or single value if dtype == 'object': factors = values.drop_duplicates() if sort: # TODO (fpliger): this handles pandas API change so users do not experience # the related annoying deprecation warning. This is probably worth # removing when pandas deprecated version (0.16) is "old" enough try: factors.sort_values(inplace=True) except AttributeError: factors.sort(inplace=True) setattr(self, dim + 'scale', 'categorical') return FactorRange(factors=factors.tolist()) elif 'datetime' in dtype: setattr(self, dim + 'scale', 'datetime') return Range1d(start=start, end=end) else: if end == 'None' or (end - start) == 0: setattr(self, dim + 'scale', 'categorical') return FactorRange(factors=['None']) else: diff = end - start setattr(self, dim + 'scale', 'linear') return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff) class AggregateBuilder(Builder): """A base class for deriving specific builders performing aggregation with stats. The typical AggregateBuilder takes a single dimension of values. """ values = Dimension('values') default_attributes = {'label': CatAttr(), 'color': ColorAttr()}
# Natural Language Toolkit: Shift-Reduce Parser # # Copyright (C) 2001-2011 NLTK Project # Author: Edward Loper <[email protected]> # Steven Bird <[email protected]> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT import string from nltk.grammar import Nonterminal, parse_cfg from nltk.tree import Tree from api import * ##////////////////////////////////////////////////////// ## Shift/Reduce Parser ##////////////////////////////////////////////////////// class ShiftReduceParser(ParserI): """ A simple bottom-up CFG parser that uses two operations, "shift" and "reduce", to find a single parse for a text. C{ShiftReduceParser} maintains a stack, which records the structure of a portion of the text. This stack is a list of C{String}s and C{Tree}s that collectively cover a portion of the text. For example, while parsing the sentence "the dog saw the man" with a typical grammar, C{ShiftReduceParser} will produce the following stack, which covers "the dog saw":: [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')] C{ShiftReduceParser} attempts to extend the stack to cover the entire text, and to combine the stack elements into a single tree, producing a complete parse for the sentence. Initially, the stack is empty. It is extended to cover the text, from left to right, by repeatedly applying two operations: - X{shift} moves a token from the beginning of the text to the end of the stack. - X{reduce} uses a CFG production to combine the rightmost stack elements into a single C{Tree}. Often, more than one operation can be performed on a given stack. In this case, C{ShiftReduceParser} uses the following heuristics to decide which operation to perform: - Only shift if no reductions are available. - If multiple reductions are available, then apply the reduction whose CFG production is listed earliest in the grammar. Note that these heuristics are not guaranteed to choose an operation that leads to a parse of the text. Also, if multiple parses exists, C{ShiftReduceParser} will return at most one of them. @see: C{nltk.grammar} """ def __init__(self, grammar, trace=0): """ Create a new C{ShiftReduceParser}, that uses C{grammar} to parse texts. @type grammar: C{Grammar} @param grammar: The grammar used to parse texts. @type trace: C{int} @param trace: The level of tracing that should be used when parsing a text. C{0} will generate no tracing output; and higher numbers will produce more verbose tracing output. """ self._grammar = grammar self._trace = trace self._check_grammar() def grammar(self): return self._grammar def parse(self, tokens): tokens = list(tokens) self._grammar.check_coverage(tokens) # initialize the stack. stack = [] remaining_text = tokens # Trace output. if self._trace: print 'Parsing %r' % string.join(tokens) self._trace_stack(stack, remaining_text) # iterate through the text, pushing the token onto # the stack, then reducing the stack. while len(remaining_text) > 0: self._shift(stack, remaining_text) while self._reduce(stack, remaining_text): pass # Did we reduce everything? if len(stack) != 1: return None # Did we end up with the right category? if stack[0].node != self._grammar.start().symbol(): return None # We parsed successfully! return stack[0] def _shift(self, stack, remaining_text): """ Move a token from the beginning of C{remaining_text} to the end of C{stack}. @type stack: C{list} of C{String} and C{Tree} @param stack: A list of C{String}s and C{Tree}s, encoding the structure of the text that has been parsed so far. @type remaining_text: C{list} of C{String} @param remaining_text: The portion of the text that is not yet covered by C{stack}. @rtype: C{None} """ stack.append(remaining_text[0]) remaining_text.remove(remaining_text[0]) if self._trace: self._trace_shift(stack, remaining_text) def _match_rhs(self, rhs, rightmost_stack): """ @rtype: C{boolean} @return: true if the right hand side of a CFG production matches the rightmost elements of the stack. C{rhs} matches C{rightmost_stack} if they are the same length, and each element of C{rhs} matches the corresponding element of C{rightmost_stack}. A nonterminal element of C{rhs} matches any C{Tree} whose node value is equal to the nonterminal's symbol. A terminal element of C{rhs} matches any C{String} whose type is equal to the terminal. @type rhs: C{list} of (terminal and C{Nonterminal}) @param rhs: The right hand side of a CFG production. @type rightmost_stack: C{list} of (C{String} and C{Tree}) @param rightmost_stack: The rightmost elements of the parser's stack. """ if len(rightmost_stack) != len(rhs): return 0 for i in range(len(rightmost_stack)): if isinstance(rightmost_stack[i], Tree): if not isinstance(rhs[i], Nonterminal): return 0 if rightmost_stack[i].node != rhs[i].symbol(): return 0 else: if isinstance(rhs[i], Nonterminal): return 0 if rightmost_stack[i] != rhs[i]: return 0 return 1 def _reduce(self, stack, remaining_text, production=None): """ Find a CFG production whose right hand side matches the rightmost stack elements; and combine those stack elements into a single C{Tree}, with the node specified by the production's left-hand side. If more than one CFG production matches the stack, then use the production that is listed earliest in the grammar. The new C{Tree} replaces the elements in the stack. @rtype: C{Production} or C{None} @return: If a reduction is performed, then return the CFG production that the reduction is based on; otherwise, return false. @type stack: C{list} of C{String} and C{Tree} @param stack: A list of C{String}s and C{Tree}s, encoding the structure of the text that has been parsed so far. @type remaining_text: C{list} of C{String} @param remaining_text: The portion of the text that is not yet covered by C{stack}. """ if production is None: productions = self._grammar.productions() else: productions = [production] # Try each production, in order. for production in productions: rhslen = len(production.rhs()) # check if the RHS of a production matches the top of the stack if self._match_rhs(production.rhs(), stack[-rhslen:]): # combine the tree to reflect the reduction tree = Tree(production.lhs().symbol(), stack[-rhslen:]) stack[-rhslen:] = [tree] # We reduced something if self._trace: self._trace_reduce(stack, production, remaining_text) return production # We didn't reduce anything return None def trace(self, trace=2): """ Set the level of tracing output that should be generated when parsing a text. @type trace: C{int} @param trace: The trace level. A trace level of C{0} will generate no tracing output; and higher trace levels will produce more verbose tracing output. @rtype: C{None} """ # 1: just show shifts. # 2: show shifts & reduces # 3: display which tokens & productions are shifed/reduced self._trace = trace def _trace_stack(self, stack, remaining_text, marker=' '): """ Print trace output displaying the given stack and text. @rtype: C{None} @param marker: A character that is printed to the left of the stack. This is used with trace level 2 to print 'S' before shifted stacks and 'R' before reduced stacks. """ str = ' '+marker+' [ ' for elt in stack: if isinstance(elt, Tree): str += `Nonterminal(elt.node)` + ' ' else: str += `elt` + ' ' str += '* ' + string.join(remaining_text) + ']' print str def _trace_shift(self, stack, remaining_text): """ Print trace output displaying that a token has been shifted. @rtype: C{None} """ if self._trace > 2: print 'Shift %r:' % stack[-1] if self._trace == 2: self._trace_stack(stack, remaining_text, 'S') elif self._trace > 0: self._trace_stack(stack, remaining_text) def _trace_reduce(self, stack, production, remaining_text): """ Print trace output displaying that C{production} was used to reduce C{stack}. @rtype: C{None} """ if self._trace > 2: rhs = string.join(production.rhs()) print 'Reduce %r <- %s' % (production.lhs(), rhs) if self._trace == 2: self._trace_stack(stack, remaining_text, 'R') elif self._trace > 1: self._trace_stack(stack, remaining_text) def _check_grammar(self): """ Check to make sure that all of the CFG productions are potentially useful. If any productions can never be used, then print a warning. @rtype: C{None} """ productions = self._grammar.productions() # Any production whose RHS is an extension of another production's RHS # will never be used. for i in range(len(productions)): for j in range(i+1, len(productions)): rhs1 = productions[i].rhs() rhs2 = productions[j].rhs() if rhs1[:len(rhs2)] == rhs2: print 'Warning: %r will never be used' % productions[i] ##////////////////////////////////////////////////////// ## Stepping Shift/Reduce Parser ##////////////////////////////////////////////////////// class SteppingShiftReduceParser(ShiftReduceParser): """ A C{ShiftReduceParser} that allows you to setp through the parsing process, performing a single operation at a time. It also allows you to change the parser's grammar midway through parsing a text. The C{initialize} method is used to start parsing a text. C{shift} performs a single shift operation, and C{reduce} performs a single reduce operation. C{step} will perform a single reduce operation if possible; otherwise, it will perform a single shift operation. C{parses} returns the set of parses that have been found by the parser. @ivar _history: A list of C{(stack, remaining_text)} pairs, containing all of the previous states of the parser. This history is used to implement the C{undo} operation. @see: C{nltk.grammar} """ def __init__(self, grammar, trace=0): self._grammar = grammar self._trace = trace self._stack = None self._remaining_text = None self._history = [] def nbest_parse(self, tokens, n=None): tokens = list(tokens) self.initialize(tokens) while self.step(): pass return self.parses()[:n] def stack(self): """ @return: The parser's stack. @rtype: C{list} of C{String} and C{Tree} """ return self._stack def remaining_text(self): """ @return: The portion of the text that is not yet covered by the stack. @rtype: C{list} of C{String} """ return self._remaining_text def initialize(self, tokens): """ Start parsing a given text. This sets the parser's stack to C{[]} and sets its remaining text to C{tokens}. """ self._stack = [] self._remaining_text = tokens self._history = [] def step(self): """ Perform a single parsing operation. If a reduction is possible, then perform that reduction, and return the production that it is based on. Otherwise, if a shift is possible, then perform it, and return 1. Otherwise, return 0. @return: 0 if no operation was performed; 1 if a shift was performed; and the CFG production used to reduce if a reduction was performed. @rtype: C{Production} or C{boolean} """ return self.reduce() or self.shift() def shift(self): """ Move a token from the beginning of the remaining text to the end of the stack. If there are no more tokens in the remaining text, then do nothing. @return: True if the shift operation was successful. @rtype: C{boolean} """ if len(self._remaining_text) == 0: return 0 self._history.append( (self._stack[:], self._remaining_text[:]) ) self._shift(self._stack, self._remaining_text) return 1 def reduce(self, production=None): """ Use C{production} to combine the rightmost stack elements into a single C{Tree}. If C{production} does not match the rightmost stack elements, then do nothing. @return: The production used to reduce the stack, if a reduction was performed. If no reduction was performed, return C{None}. @rtype: C{Production} or C{None} """ self._history.append( (self._stack[:], self._remaining_text[:]) ) return_val = self._reduce(self._stack, self._remaining_text, production) if not return_val: self._history.pop() return return_val def undo(self): """ Return the parser to its state before the most recent shift or reduce operation. Calling C{undo} repeatedly return the parser to successively earlier states. If no shift or reduce operations have been performed, C{undo} will make no changes. @return: true if an operation was successfully undone. @rtype: C{boolean} """ if len(self._history) == 0: return 0 (self._stack, self._remaining_text) = self._history.pop() return 1 def reducible_productions(self): """ @return: A list of the productions for which reductions are available for the current parser state. @rtype: C{list} of C{Production} """ productions = [] for production in self._grammar.productions(): rhslen = len(production.rhs()) if self._match_rhs(production.rhs(), self._stack[-rhslen:]): productions.append(production) return productions def parses(self): """ @return: A list of the parses that have been found by this parser so far. @rtype: C{list} of C{Tree} """ if len(self._remaining_text) != 0: return [] if len(self._stack) != 1: return [] if self._stack[0].node != self._grammar.start().symbol(): return [] return self._stack # copied from nltk.parser def set_grammar(self, grammar): """ Change the grammar used to parse texts. @param grammar: The new grammar. @type grammar: C{CFG} """ self._grammar = grammar ##////////////////////////////////////////////////////// ## Demonstration Code ##////////////////////////////////////////////////////// def demo(): """ A demonstration of the shift-reduce parser. """ from nltk import parse, parse_cfg grammar = parse_cfg(""" S -> NP VP NP -> Det N | Det N PP VP -> V NP | V NP PP PP -> P NP NP -> 'I' N -> 'man' | 'park' | 'telescope' | 'dog' Det -> 'the' | 'a' P -> 'in' | 'with' V -> 'saw' """) sent = 'I saw a man in the park'.split() parser = parse.ShiftReduceParser(grammar, trace=2) for p in parser.nbest_parse(sent): print p if __name__ == '__main__': demo()
"""Test functions for the sparse.linalg._expm_multiply module """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_allclose, assert_, assert_equal, suppress_warnings) from scipy.sparse import SparseEfficiencyWarning import scipy.linalg from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max, _onenormest_matrix_power, expm_multiply, _expm_multiply_simple, _expm_multiply_interval) def less_than_or_close(a, b): return np.allclose(a, b) or (a < b) class TestExpmActionSimple(object): """ These tests do not consider the case of multiple time steps in one call. """ def test_theta_monotonicity(self): pairs = sorted(_theta.items()) for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]): assert_(theta_a < theta_b) def test_p_max_default(self): m_max = 55 expected_p_max = 8 observed_p_max = _compute_p_max(m_max) assert_equal(observed_p_max, expected_p_max) def test_p_max_range(self): for m_max in range(1, 55+1): p_max = _compute_p_max(m_max) assert_(p_max*(p_max - 1) <= m_max + 1) p_too_big = p_max + 1 assert_(p_too_big*(p_too_big - 1) > m_max + 1) def test_onenormest_matrix_power(self): np.random.seed(1234) n = 40 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) for p in range(4): if not p: M = np.identity(n) else: M = np.dot(M, A) estimated = _onenormest_matrix_power(A, p) exact = np.linalg.norm(M, 1) assert_(less_than_or_close(estimated, exact)) assert_(less_than_or_close(exact, 3*estimated)) def test_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) observed = expm_multiply(A, B) expected = np.dot(scipy.linalg.expm(A), B) assert_allclose(observed, expected) def test_matrix_vector_multiply(self): np.random.seed(1234) n = 40 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) v = np.random.randn(n) observed = expm_multiply(A, v) expected = np.dot(scipy.linalg.expm(A), v) assert_allclose(observed, expected) def test_scaled_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): for t in (0.2, 1.0, 1.5): with np.errstate(invalid='ignore'): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) observed = _expm_multiply_simple(A, B, t=t) expected = np.dot(scipy.linalg.expm(t*A), B) assert_allclose(observed, expected) def test_scaled_expm_multiply_single_timepoint(self): np.random.seed(1234) t = 0.1 n = 5 k = 2 A = np.random.randn(n, n) B = np.random.randn(n, k) observed = _expm_multiply_simple(A, B, t=t) expected = scipy.linalg.expm(t*A).dot(B) assert_allclose(observed, expected) def test_sparse_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) observed = expm_multiply(A, B) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") expected = scipy.linalg.expm(A).dot(B) assert_allclose(observed, expected) def test_complex(self): A = np.array([ [1j, 1j], [0, 1j]], dtype=complex) B = np.array([1j, 1j]) observed = expm_multiply(A, B) expected = np.array([ 1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)), 1j * np.exp(1j)], dtype=complex) assert_allclose(observed, expected) class TestExpmActionInterval(object): def test_sparse_expm_multiply_interval(self): np.random.seed(1234) start = 0.1 stop = 3.2 n = 40 k = 3 endpoint = True for num in (14, 13, 2): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) v = np.random.randn(n) for target in (B, v): X = expm_multiply(A, target, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(target)) def test_expm_multiply_interval_vector(self): np.random.seed(1234) start = 0.1 stop = 3.2 endpoint = True for num in (14, 13, 2): for n in (1, 2, 5, 20, 40): A = scipy.linalg.inv(np.random.randn(n, n)) v = np.random.randn(n) X = expm_multiply(A, v, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(v)) def test_expm_multiply_interval_matrix(self): np.random.seed(1234) start = 0.1 stop = 3.2 endpoint = True for num in (14, 13, 2): for n in (1, 2, 5, 20, 40): for k in (1, 2): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) X = expm_multiply(A, B, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) def test_sparse_expm_multiply_interval_dtypes(self): # Test A & B int A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) B = np.ones(5, dtype=int) Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) # Test A complex, B int A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex) B = np.ones(5, dtype=int) Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) # Test A int, B complex A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) B = np.full(5, 1j, dtype=complex) Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) def test_expm_multiply_interval_status_0(self): self._help_test_specific_expm_interval_status(0) def test_expm_multiply_interval_status_1(self): self._help_test_specific_expm_interval_status(1) def test_expm_multiply_interval_status_2(self): self._help_test_specific_expm_interval_status(2) def _help_test_specific_expm_interval_status(self, target_status): np.random.seed(1234) start = 0.1 stop = 3.2 num = 13 endpoint = True n = 5 k = 2 nrepeats = 10 nsuccesses = 0 for num in [14, 13, 2] * nrepeats: A = np.random.randn(n, n) B = np.random.randn(n, k) status = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=True) if status == target_status: X, status = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=False) assert_equal(X.shape, (num, n, k)) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) nsuccesses += 1 if not nsuccesses: msg = 'failed to find a status-' + str(target_status) + ' interval' raise Exception(msg)
# # # Copyright (C) 2013 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Utility functions for storage. """ import logging from ganeti import constants from ganeti import errors from ganeti.utils import io as utils_io from ganeti.utils import process as utils_process def GetDiskTemplatesOfStorageTypes(*storage_types): """Given the storage type, returns a list of disk templates based on that storage type.""" return [dt for dt in constants.DISK_TEMPLATES if constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[dt] in storage_types] def IsDiskTemplateEnabled(disk_template, enabled_disk_templates): """Checks if a particular disk template is enabled. """ return disk_template in enabled_disk_templates def IsFileStorageEnabled(enabled_disk_templates): """Checks if file storage is enabled. """ return IsDiskTemplateEnabled(constants.DT_FILE, enabled_disk_templates) def IsSharedFileStorageEnabled(enabled_disk_templates): """Checks if shared file storage is enabled. """ return IsDiskTemplateEnabled(constants.DT_SHARED_FILE, enabled_disk_templates) def IsLvmEnabled(enabled_disk_templates): """Check whether or not any lvm-based disk templates are enabled.""" return len(constants.DTS_LVM & set(enabled_disk_templates)) != 0 def LvmGetsEnabled(enabled_disk_templates, new_enabled_disk_templates): """Checks whether lvm was not enabled before, but will be enabled after the operation. """ if IsLvmEnabled(enabled_disk_templates): return False return len(constants.DTS_LVM & set(new_enabled_disk_templates)) != 0 def _GetDefaultStorageUnitForDiskTemplate(cfg, disk_template): """Retrieves the identifier of the default storage entity for the given storage type. @type cfg: C{objects.ConfigData} @param cfg: the configuration data @type disk_template: string @param disk_template: a disk template, for example 'drbd' @rtype: string @return: identifier for a storage unit, for example the vg_name for lvm storage """ storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] cluster = cfg.GetClusterInfo() if disk_template in constants.DTS_LVM: return (storage_type, cfg.GetVGName()) elif disk_template == constants.DT_FILE: return (storage_type, cluster.file_storage_dir) elif disk_template == constants.DT_SHARED_FILE: return (storage_type, cluster.shared_file_storage_dir) elif disk_template == constants.DT_GLUSTER: return (storage_type, cluster.gluster_storage_dir) else: return (storage_type, None) def DiskTemplateSupportsSpaceReporting(disk_template): """Check whether the disk template supports storage space reporting.""" return (constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] in constants.STS_REPORT) def GetStorageUnits(cfg, disk_templates): """Get the cluster's storage units for the given disk templates. If any lvm-based disk template is requested, spindle information is added to the request. @type cfg: L{config.ConfigWriter} @param cfg: Cluster configuration @type disk_templates: list of string @param disk_templates: list of disk templates for which the storage units will be computed @rtype: list of tuples (string, string) @return: list of storage units, each storage unit being a tuple of (storage_type, storage_key); storage_type is in C{constants.STORAGE_TYPES} and the storage_key a string to identify an entity of that storage type, for example a volume group name for LVM storage or a file for file storage. """ storage_units = [] for disk_template in disk_templates: if DiskTemplateSupportsSpaceReporting(disk_template): storage_units.append( _GetDefaultStorageUnitForDiskTemplate(cfg, disk_template)) return storage_units def LookupSpaceInfoByDiskTemplate(storage_space_info, disk_template): """Looks up the storage space info for a given disk template. @type storage_space_info: list of dicts @param storage_space_info: result of C{GetNodeInfo} @type disk_template: string @param disk_template: disk template to get storage space info @rtype: tuple @return: returns the element of storage_space_info that matches the given disk template """ storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] return LookupSpaceInfoByStorageType(storage_space_info, storage_type) def LookupSpaceInfoByStorageType(storage_space_info, storage_type): """Looks up the storage space info for a given storage type. Note that this lookup can be ambiguous if storage space reporting for several units of the same storage type was requested. This function is only supposed to be used for legacy code in situations where it actually is unambiguous. @type storage_space_info: list of dicts @param storage_space_info: result of C{GetNodeInfo} @type storage_type: string @param storage_type: a storage type, which is included in the storage_units list @rtype: tuple @return: returns the element of storage_space_info that matches the given storage type """ result = None for unit_info in storage_space_info: if unit_info["type"] == storage_type: if result is None: result = unit_info else: # There is more than one storage type in the query, log a warning logging.warning("Storage space information requested for" " ambiguous storage type '%s'.", storage_type) return result def GetDiskLabels(prefix, num_disks, start=0): """Generate disk labels for a number of disks Note that disk labels are generated in the range [start..num_disks[ (e.g., as in range(start, num_disks)) @type prefix: string @param prefix: disk label prefix (e.g., "/dev/sd") @type num_disks: int @param num_disks: number of disks (i.e., disk labels) @type start: int @param start: optional start index @rtype: generator @return: generator for the disk labels """ def _GetDiskSuffix(i): n = ord('z') - ord('a') + 1 if i < n: return chr(ord('a') + i) else: mod = int(i % n) pref = _GetDiskSuffix((i - mod) / (n + 1)) suf = _GetDiskSuffix(mod) return pref + suf for i in range(start, num_disks): yield prefix + _GetDiskSuffix(i) def CreateBdevPartitionMapping(image_path): """Create dm device for each partition of disk image. This operation will allocate a loopback and a device-mapper device to map partitions. You must call L{ReleaseBdevPartitionMapping} to clean up resources allocated by this function call. @type image_path: string @param image_path: path of multi-partition disk image @rtype: tuple(string, list(string)) or NoneType @return: returns the tuple(loopback_device, list(device_mapper_files)) if image_path is a multi-partition disk image. otherwise, returns None. """ # Unfortunately, there are two different losetup commands in this world. # One has the '-s' switch and the other has the '--show' switch to provide the # same functionality. result = utils_process.RunCmd(["losetup", "-f", "-s", image_path]) if result.failed and "invalid option -- 's'" in result.stderr: result = utils_process.RunCmd(["losetup", "-f", "--show", image_path]) if result.failed: raise errors.CommandError("Failed to setup loop device for %s: %s" % (image_path, result.output)) loop_dev_path = result.stdout.strip() logging.debug("Loop dev %s allocated for %s", loop_dev_path, image_path) result = utils_process.RunCmd(["kpartx", "-a", "-v", loop_dev_path]) if result.failed: # Just try to cleanup allocated loop device utils_process.RunCmd(["losetup", "-d", loop_dev_path]) raise errors.CommandError("Failed to add partition mapping for %s: %s" % (image_path, result.output)) dm_devs = [x.split(" ") for x in result.stdout.split("\n") if x] if dm_devs: dm_dev_paths = [utils_io.PathJoin("/dev/mapper", x[2]) for x in dm_devs] return (loop_dev_path, dm_dev_paths) else: # image_path is not a multi partition disk image, no need to use # device-mapper. logging.debug("Release loop dev %s allocated for %s", loop_dev_path, image_path) ReleaseBdevPartitionMapping(loop_dev_path) return None def ReleaseBdevPartitionMapping(loop_dev_path): """Release allocated dm devices and loopback devices. @type loop_dev_path: string @param loop_dev_path: path of loopback device returned by L{CreateBdevPartitionMapping} """ result = utils_process.RunCmd(["kpartx", "-d", loop_dev_path]) if result.failed: raise errors.CommandError("Failed to release partition mapping of %s: %s" % (loop_dev_path, result.output)) result = utils_process.RunCmd(["losetup", "-d", loop_dev_path]) if result.failed: raise errors.CommandError("Failed to detach %s: %s" % (loop_dev_path, result.output))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Generic discrete convolution Description ----------- This is a manual (not optimized!) implementation of discrete 1D convolution intended for spectroscopy analysis. The difference with commonly used methods is the possibility to adapt the convolution kernel for each convolution point, e.g. change the FWHM of the Gaussian kernel as a function of the energy scale. Resources --------- .. [WPconv] <http://en.wikipedia.org/wiki/Convolution#Discrete_convolution> .. [Fisher] <http://homepages.inf.ed.ac.uk/rbf/HIPR2/convolve.htm> .. [GP1202] <http://glowingpython.blogspot.fr/2012/02/convolution-with-numpy.html> TODO ---- - [] get_ene_index: substitute with more elegant 'np.argmin(np.abs(ene-(x)))' - [] atan_gamma_fdmnes: define atan_gamma as in FDMNES """ import os import math import subprocess from optparse import OptionParser from datetime import date from string import Template import numpy as np MODNAME = "_math" DEBUG = 0 # No deps on Larch: used only if you want to access this as Larch plugin HAS_LARCH = False try: from larch import use_plugin_path use_plugin_path("math") HAS_LARCH = True except ImportError: pass # <LINESHAPES> # def gaussian(x, cen=0, sigma=1, fwhm=False, peak=None): """1 dimensional Gaussian function (https://en.wikipedia.org/wiki/Gaussian_function) Parameters ---------- x : array cen : [0] center, x0 sigma : [1] standard deviation, FWHM = 2*sqrt(2*ln(2)) * sigma =~ 2.35482 * sigma fwhm : [False] if True, the given sigma is assumed as fwhm and then converted accordingly peak : [None] if None, peak = 1 / math.sqrt(2*math.pi), the distribution integrate to 1 """ if fwhm is True: sigma = sigma / 2 * math.sqrt(2 * math.log(2)) if peak is None: peak = 1.0 / math.sqrt(2 * math.pi) return peak * np.exp(-((1.0 * x - cen) ** 2) / (2 * sigma ** 2)) def lorentzian(x, cen=0, gamma=1, peak=None): """1 dimensional Lorentzian Parameters ---------- x : array cen : [0] center, x0 gamma : [1] half width at half maximum peak : [None] if None, peak = 1 / (math.pi*sigma), the distribution integrate to 1 """ if peak is None: peak = 1.0 / (math.pi * gamma) return peak * (1.0 / (1.0 + ((1.0 * x - cen) / gamma) ** 2)) # </LINESHAPES> # def get_ene_index(ene, cen, hwhm): """ returns the min/max indexes for array ene at (cen-hwhm) and (cen+hwhm) very similar to index_of in larch """ try: if (cen - hwhm) <= min(ene): ene_imin = 0 else: ene_imin = max(np.where(ene < (cen - hwhm))[0]) if (cen + hwhm) >= max(ene): ene_imax = len(ene) - 1 else: ene_imax = min(np.where(ene > (cen + hwhm))[0]) return ene_imin, ene_imax except Exception: print("index not found for {0} +/- {1}".format(cen, hwhm)) return None, None def lin_gamma(ene, fwhm=1.0, linbroad=None): """returns constant or linear energy-dependent broadening Parameters ---------- ene : energy array in eV fwhm : first full width at half maximum in eV linbroad : list of 3-elements giving 'second full width at half maximum' 'energy starting point of the linear increase' 'energy ending point of the linear increase' """ w = np.ones_like(ene) if linbroad is None: return w * fwhm else: try: fwhm2 = linbroad[0] e1 = linbroad[1] e2 = linbroad[2] except Exception: raise ValueError("wrong format for linbroad") for en, ee in enumerate(ene): if ee < e1: w[en] *= fwhm elif ee <= e2: wlin = fwhm + (ee - e1) * (fwhm2 - fwhm) / (e2 - e1) w[en] *= wlin elif ee >= e2: w[en] *= fwhm2 return w def atan_gamma(ene, gamma_hole, gamma_max=15.0, e0=0, eslope=1.0): """returns arctangent-like broadening, $\Gamma(E)$ ..math \Gamma(E) = \Gamma_{hole} + \Gamma_{max} * ( \arctan( \frac{E-E_{0}}{E_{slope}} ) / \pi + 1/2) ) """ if eslope == 0: print("Warning: eslope cannot be zero, using default value of 1") eslope = 1.0 return gamma_hole + gamma_max * ((np.arctan((ene - e0) / eslope) / np.pi) + 0.5) def conv(e, mu, kernel="gaussian", fwhm_e=None, efermi=None): """ linear broadening Parameters ---------- e : x-axis (energy) mu : f(x) to convolve with g(x) kernel, mu(energy) kernel : convolution kernel, g(x) 'gaussian' 'lorentzian' fwhm_e: the full width half maximum in eV for the kernel broadening. It is an array of size 'e' with constants or an energy-dependent values determined by a function as 'lin_gamma()' or 'atan_gamma()' """ f = np.copy(mu) z = np.zeros_like(f) if efermi is not None: # ief = index_nearest(e, efermi) ief = np.argmin(np.abs(e - efermi)) f[0:ief] *= 0 if e.shape != fwhm_e.shape: print("Error: 'fwhm_e' does not have the same shape of 'e'") return 0 # linar fit upper part of the spectrum to avoid border effects # polyfit => pf lpf = int(len(e) / 2) cpf = np.polyfit(e[-lpf:], f[-lpf:], 1) fpf = np.poly1d(cpf) # extend upper energy border to 3*fhwm_e[-1] estep = e[-1] - e[-2] eup = np.append(e, np.arange(e[-1] + estep, e[-1] + 3 * fwhm_e[-1], estep)) for n in range(len(f)): # from now on I change e with eup eimin, eimax = get_ene_index(eup, eup[n], 1.5 * fwhm_e[n]) if (eimin is None) or (eimax is None): if DEBUG: raise IndexError("e[{0}]".format(n)) if len(range(eimin, eimax)) % 2 == 0: kx = eup[eimin:eimax + 1] # odd range centered at the convolution point else: kx = eup[eimin:eimax] # kernel ### hwhm = fwhm_e[n] / 2.0 if "gauss" in kernel.lower(): ky = gaussian(kx, cen=eup[n], sigma=hwhm) elif "lor" in kernel.lower(): ky = lorentzian(kx, cen=eup[n], gamma=hwhm) else: raise ValueError("convolution kernel '{0}' not implemented".format(kernel)) ky = ky / ky.sum() # normalize zn = 0 lk = int(len(kx)) for mf, mg in zip(range(-int(lk / 2), int(lk / 2) + 1), range(lk)): if ((n + mf) >= 0) and ((n + mf) < len(f)): zn += f[n + mf] * ky[mg] elif (n + mf) >= 0: zn += fpf(eup[n + mf]) * ky[mg] z[n] = zn return z def glinbroad(e, mu, fwhm_e=None, efermi=None, _larch=None): """gaussian linear convolution in Larch """ if _larch is None: raise Warning("larch broken?") return conv(e, mu, kernel="gaussian", fwhm_e=fwhm_e, efermi=efermi) glinbroad.__doc__ = conv.__doc__ # CONVOLUTION WITH FDMNES VIA SYSTEM CALL # class FdmnesConv(object): """ Performs convolution with FDMNES within Python """ def __init__(self, opts=None, calcroot=None, fn_in=None, fn_out=None): if opts is None: self.opts = dict( creator="FDMNES toolbox", today=date.today(), calcroot=calcroot, fn_in=fn_in, fn_out=fn_out, fn_ext="txt", estart_sel="", estart="-20.", efermi_sel="", efermi="-5.36", spin="", core_sel="!", core="!", hole_sel="", hole="0.5", conv_const="!", conv_sel="", ecent="25.0", elarg="20.0", gamma_max="10.0", gamma_type="Gamma_fix", gauss_sel="", gaussian="0.9", ) else: self.opts = opts if calcroot is not None: self.opts["calcroot"] = calcroot self.opts["fn_in"] = "{}.{}".format(calcroot, self.opts["fn_ext"]) self.opts["fn_out"] = "{}_conv{}.{}".format( calcroot, self.opts["spin"], self.opts["fn_ext"] ) if fn_in is not None: self.opts["calcroot"] = fn_in[:-4] self.opts["fn_in"] = fn_in self.opts["fn_out"] = "{}_conv{}.{}".format( fn_in[:-4], self.opts["spin"], self.opts["fn_ext"] ) if fn_out is not None: self.opts["fn_out"] = fn_out # then check all options self.checkopts() def checkopts(self): if (self.opts["calcroot"] is None) or (self.opts["fn_in"] is None): raise NameError("missing 'calcroot' or 'fn_in'") if self.opts["estart"] == "!": self.opts["estart_sel"] = "!" if self.opts["efermi"] == "!": self.opts["efermi_sel"] = "!" if self.opts["spin"] == "up": self.opts["core_sel"] = "" self.opts["core"] = "2 !spin up" elif self.opts["spin"] == "down": self.opts["core_sel"] = "" self.opts["core"] = "1 !spin down" elif self.opts["spin"] == "": self.opts["core_sel"] = "!" elif self.opts["spin"] == "both": raise NameError('spin="both" not implemented!') else: self.opts["spin"] = "" self.opts["core_sel"] = "!" self.opts["core"] = "!" if self.opts["hole"] == "!": self.opts["hole_sel"] = "!" if self.opts["conv_const"] == "!": self.opts["conv_sel"] = "!" else: self.opts["conv_sel"] = "" if self.opts["gamma_type"] == "Gamma_fix": pass elif self.opts["gamma_type"] == "Gamma_var": pass else: raise NameError('gamma_type="Gamma_fix"/"Gamma_var"') if self.opts["gaussian"] == "!": self.opts["gauss_sel"] = "!" else: self.opts["gauss_sel"] = "" # update the output file name self.opts["fn_out"] = "{}_conv{}.{}".format( self.opts["calcroot"], self.opts["spin"], self.opts["fn_ext"] ) def setopt(self, opt, value): self.opts[opt] = value self.checkopts() def wfdmfile(self): """ write a simple fdmfile.txt to enable the convolution first makes a copy of previous fdmfile.txt if not already done """ if os.path.exists("fdmfile.bak"): print("fdmfile.bak exists, good") else: subprocess.call("cp fdmfile.txt fdmfile.bak", shell=True) print("copied fdmfile.txt to fmdfile.bak") # s = Template( "!fdmfile.txt automatically created by ${creator} on ${today} (for convolution)\n\ !--------------------------------------------------------------------!\n\ ! Number of calculations\n\ 1\n\ ! FOR CONVOLUTION STEP\n\ convfile.txt\n\ !--------------------------------------------------------------------!\n\ " ) outstr = s.substitute(self.opts) f = open("fdmfile.txt", "w") f.write(outstr) f.close() def wconvfile(self): s = Template( """ !FDMNES convolution file\n\ !created by ${creator} on ${today}\n\ ! Calculation\n\ ${fn_in}\n\ Conv_out\n\ ${fn_out}\n\ ${estart_sel}Estart\n\ ${estart_sel}${estart}\n\ ${efermi_sel}Efermi\n\ ${efermi_sel}${efermi}\n\ ${core_sel}Selec_core\n\ ${core_sel}${core}\n\ ${hole_sel}Gamma_hole\n\ ${hole_sel}${hole}\n\ ${conv_sel}Convolution\n\ ${conv_sel}${ecent} ${elarg} ${gamma_max} !Ecent Elarg Gamma_max\n\ ${conv_sel}${gamma_type}\n\ ${gauss_sel}Gaussian\n\ ${gauss_sel}${gaussian} !Gaussian conv for experimental res\n\ """ ) outstr = s.substitute(self.opts) f = open("convfile.txt", "w") f.write(outstr) f.close() def run(self): """ runs fdmnes """ self.wfdmfile() # write fdmfile.txt self.wconvfile() # write convfile.txt try: subprocess.call("fdmnes", shell=True) except OSError: print("check 'fdmnes' executable exists!") # LARCH PLUGIN # def registerLarchPlugin(): return (MODNAME, {"glinbroad": glinbroad}) if __name__ == "__main__": # tests/examples in xraysloth/examples/convolution1D_tests.py pass
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file import os,shlex,sys,time from waflib import ConfigSet,Utils,Options,Logs,Context,Build,Errors try: from urllib import request except: from urllib import urlopen else: urlopen=request.urlopen BREAK='break' CONTINUE='continue' WAF_CONFIG_LOG='config.log' autoconfig=False conf_template='''# project %(app)s configured on %(now)s by # waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s) # using %(args)s #''' def download_check(node): pass def download_tool(tool,force=False,ctx=None): for x in Utils.to_list(Context.remote_repo): for sub in Utils.to_list(Context.remote_locs): url='/'.join((x,sub,tool+'.py')) try: web=urlopen(url) try: if web.getcode()!=200: continue except AttributeError: pass except Exception: continue else: tmp=ctx.root.make_node(os.sep.join((Context.waf_dir,'waflib','extras',tool+'.py'))) tmp.write(web.read()) Logs.warn('Downloaded %s from %s'%(tool,url)) download_check(tmp) try: module=Context.load_tool(tool) except: Logs.warn('The tool %s from %s is unusable'%(tool,url)) try: tmp.delete() except: pass continue return module raise Errors.WafError('Could not load the Waf tool') class ConfigurationContext(Context.Context): '''configures the project''' cmd='configure' error_handlers=[] def __init__(self,**kw): super(ConfigurationContext,self).__init__(**kw) self.environ=dict(os.environ) self.all_envs={} self.top_dir=None self.out_dir=None self.tools=[] self.hash=0 self.files=[] self.tool_cache=[] self.setenv('') def setenv(self,name,env=None): if not env: env=ConfigSet.ConfigSet() self.prepare_env(env) else: env=env.derive() self.all_envs[name]=env self.variant=name def get_env(self): return self.all_envs[self.variant] def set_env(self,val): self.all_envs[self.variant]=val env=property(get_env,set_env) def init_dirs(self): top=self.top_dir if not top: top=Options.options.top if not top: top=getattr(Context.g_module,Context.TOP,None) if not top: top=self.path.abspath() top=os.path.abspath(top) self.srcnode=(os.path.isabs(top)and self.root or self.path).find_dir(top) assert(self.srcnode) out=self.out_dir if not out: out=Options.options.out if not out: out=getattr(Context.g_module,Context.OUT,None) if not out: out=Options.lockfile.replace('.lock-waf','') self.bldnode=(os.path.isabs(out)and self.root or self.path).make_node(out) self.bldnode.mkdir() if not os.path.isdir(self.bldnode.abspath()): conf.fatal('could not create the build directory %s'%self.bldnode.abspath()) def execute(self): self.init_dirs() self.cachedir=self.bldnode.make_node(Build.CACHE_DIR) self.cachedir.mkdir() path=os.path.join(self.bldnode.abspath(),WAF_CONFIG_LOG) self.logger=Logs.make_logger(path,'cfg') app=getattr(Context.g_module,'APPNAME','') if app: ver=getattr(Context.g_module,'VERSION','') if ver: app="%s (%s)"%(app,ver) now=time.ctime() pyver=sys.hexversion systype=sys.platform args=" ".join(sys.argv) wafver=Context.WAFVERSION abi=Context.ABI self.to_log(conf_template%vars()) self.msg('Setting top to',self.srcnode.abspath()) self.msg('Setting out to',self.bldnode.abspath()) if id(self.srcnode)==id(self.bldnode): Logs.warn('Setting top == out (remember to use "update_outputs")') elif id(self.path)!=id(self.srcnode): if self.srcnode.is_child_of(self.path): Logs.warn('Are you certain that you do not want to set top="." ?') super(ConfigurationContext,self).execute() self.store() Context.top_dir=self.srcnode.abspath() Context.out_dir=self.bldnode.abspath() env=ConfigSet.ConfigSet() env['argv']=sys.argv env['options']=Options.options.__dict__ env.run_dir=Context.run_dir env.top_dir=Context.top_dir env.out_dir=Context.out_dir env['hash']=self.hash env['files']=self.files env['environ']=dict(self.environ) if not self.env.NO_LOCK_IN_RUN: env.store(Context.run_dir+os.sep+Options.lockfile) if not self.env.NO_LOCK_IN_TOP: env.store(Context.top_dir+os.sep+Options.lockfile) if not self.env.NO_LOCK_IN_OUT: env.store(Context.out_dir+os.sep+Options.lockfile) def prepare_env(self,env): if not env.PREFIX: env.PREFIX=os.path.abspath(os.path.expanduser(Options.options.prefix)) if not env.BINDIR: env.BINDIR=Utils.subst_vars('${PREFIX}/bin',env) if not env.LIBDIR: env.LIBDIR=Utils.subst_vars('${PREFIX}/lib',env) def store(self): n=self.cachedir.make_node('build.config.py') n.write('version = 0x%x\ntools = %r\n'%(Context.HEXVERSION,self.tools)) if not self.all_envs: self.fatal('nothing to store in the configuration context!') for key in self.all_envs: tmpenv=self.all_envs[key] tmpenv.store(os.path.join(self.cachedir.abspath(),key+Build.CACHE_SUFFIX)) def load(self,input,tooldir=None,funs=None,download=True): tools=Utils.to_list(input) if tooldir:tooldir=Utils.to_list(tooldir) for tool in tools: mag=(tool,id(self.env),funs) if mag in self.tool_cache: self.to_log('(tool %s is already loaded, skipping)'%tool) continue self.tool_cache.append(mag) module=None try: module=Context.load_tool(tool,tooldir) except ImportError ,e: if Options.options.download: module=download_tool(tool,ctx=self) if not module: self.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s'%(tool,sys.path,e)) else: self.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s'%(tool,sys.path,e)) except Exception ,e: self.to_log('imp %r (%r & %r)'%(tool,tooldir,funs)) self.to_log(Utils.ex_stack()) raise if funs is not None: self.eval_rules(funs) else: func=getattr(module,'configure',None) if func: if type(func)is type(Utils.readf):func(self) else:self.eval_rules(func) self.tools.append({'tool':tool,'tooldir':tooldir,'funs':funs}) def post_recurse(self,node): super(ConfigurationContext,self).post_recurse(node) self.hash=hash((self.hash,node.read('rb'))) self.files.append(node.abspath()) def eval_rules(self,rules): self.rules=Utils.to_list(rules) for x in self.rules: f=getattr(self,x) if not f:self.fatal("No such method '%s'."%x) try: f() except Exception ,e: ret=self.err_handler(x,e) if ret==BREAK: break elif ret==CONTINUE: continue else: raise def err_handler(self,fun,error): pass def conf(f): def fun(*k,**kw): mandatory=True if'mandatory'in kw: mandatory=kw['mandatory'] del kw['mandatory'] try: return f(*k,**kw) except Errors.ConfigurationError ,e: if mandatory: raise e setattr(ConfigurationContext,f.__name__,fun) setattr(Build.BuildContext,f.__name__,fun) return f def add_os_flags(self,var,dest=None): try:self.env.append_value(dest or var,shlex.split(self.environ[var])) except KeyError:pass def cmd_to_list(self,cmd): if isinstance(cmd,str)and cmd.find(' '): try: os.stat(cmd) except OSError: return shlex.split(cmd) else: return[cmd] return cmd def check_waf_version(self,mini='1.6.0',maxi='1.7.0'): self.start_msg('Checking for waf version in %s-%s'%(str(mini),str(maxi))) ver=Context.HEXVERSION if Utils.num2ver(mini)>ver: self.fatal('waf version should be at least %r (%r found)'%(Utils.num2ver(mini),ver)) if Utils.num2ver(maxi)<ver: self.fatal('waf version should be at most %r (%r found)'%(Utils.num2ver(maxi),ver)) self.end_msg('ok') def find_file(self,filename,path_list=[]): for n in Utils.to_list(filename): for d in Utils.to_list(path_list): p=os.path.join(d,n) if os.path.exists(p): return p self.fatal('Could not find %r'%filename) def find_program(self,filename,**kw): exts=kw.get('exts',Utils.is_win32 and'.exe,.com,.bat,.cmd'or',.sh,.pl,.py') environ=kw.get('environ',os.environ) ret='' filename=Utils.to_list(filename) var=kw.get('var','') if not var: var=filename[0].upper() if self.env[var]: ret=self.env[var] elif var in environ: ret=environ[var] path_list=kw.get('path_list','') if not ret: if path_list: path_list=Utils.to_list(path_list) else: path_list=environ.get('PATH','').split(os.pathsep) if not isinstance(filename,list): filename=[filename] for a in exts.split(','): if ret: break for b in filename: if ret: break for c in path_list: if ret: break x=os.path.expanduser(os.path.join(c,b+a)) if os.path.isfile(x): ret=x if not ret and Utils.winreg: ret=Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER,filename) if not ret and Utils.winreg: ret=Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE,filename) self.msg('Checking for program '+','.join(filename),ret or False) self.to_log('find program=%r paths=%r var=%r -> %r'%(filename,path_list,var,ret)) if not ret: self.fatal(kw.get('errmsg','')or'Could not find the program %s'%','.join(filename)) if var: self.env[var]=ret return ret def find_perl_program(self,filename,path_list=[],var=None,environ=None,exts=''): try: app=self.find_program(filename,path_list=path_list,var=var,environ=environ,exts=exts) except: self.find_program('perl',var='PERL') app=self.find_file(filename,os.environ['PATH'].split(os.pathsep)) if not app: raise if var: self.env[var]=Utils.to_list(self.env['PERL'])+[app] self.msg('Checking for %r'%filename,app) conf(add_os_flags) conf(cmd_to_list) conf(check_waf_version) conf(find_file) conf(find_program) conf(find_perl_program)
""" Build swig and f2py sources. """ from __future__ import division, absolute_import, print_function import os import re import sys import shlex import copy from distutils.command import build_ext from distutils.dep_util import newer_group, newer from distutils.util import get_platform from distutils.errors import DistutilsError, DistutilsSetupError # this import can't be done here, as it uses numpy stuff only available # after it's installed #import numpy.f2py from numpy.distutils import log from numpy.distutils.misc_util import ( fortran_ext_match, appendpath, is_string, is_sequence, get_cmd ) from numpy.distutils.from_template import process_file as process_f_file from numpy.distutils.conv_template import process_file as process_c_file def subst_vars(target, source, d): """Substitute any occurrence of @foo@ by d['foo'] from source file into target.""" var = re.compile('@([a-zA-Z_]+)@') with open(source, 'r') as fs: with open(target, 'w') as ft: for l in fs: m = var.search(l) if m: ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) else: ft.write(l) class build_src(build_ext.build_ext): description = "build sources from SWIG, F2PY files or a function" user_options = [ ('build-src=', 'd', "directory to \"build\" sources to"), ('f2py-opts=', None, "list of f2py command line options"), ('swig=', None, "path to the SWIG executable"), ('swig-opts=', None, "list of SWIG command line options"), ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('inplace', 'i', "ignore build-lib and put compiled extensions into the source " + "directory alongside your pure Python modules"), ] boolean_options = ['force', 'inplace'] help_options = [] def initialize_options(self): self.extensions = None self.package = None self.py_modules = None self.py_modules_dict = None self.build_src = None self.build_lib = None self.build_base = None self.force = None self.inplace = None self.package_dir = None self.f2pyflags = None # obsolete self.f2py_opts = None self.swigflags = None # obsolete self.swig_opts = None self.swig_cpp = None self.swig = None def finalize_options(self): self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib'), ('force', 'force')) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules self.libraries = self.distribution.libraries or [] self.py_modules = self.distribution.py_modules or [] self.data_files = self.distribution.data_files or [] if self.build_src is None: plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) # py_modules_dict is used in build_py.find_package_modules self.py_modules_dict = {} if self.f2pyflags: if self.f2py_opts: log.warn('ignoring --f2pyflags as --f2py-opts already used') else: self.f2py_opts = self.f2pyflags self.f2pyflags = None if self.f2py_opts is None: self.f2py_opts = [] else: self.f2py_opts = shlex.split(self.f2py_opts) if self.swigflags: if self.swig_opts: log.warn('ignoring --swigflags as --swig-opts already used') else: self.swig_opts = self.swigflags self.swigflags = None if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = shlex.split(self.swig_opts) # use options from build_ext command build_ext = self.get_finalized_command('build_ext') if self.inplace is None: self.inplace = build_ext.inplace if self.swig_cpp is None: self.swig_cpp = build_ext.swig_cpp for c in ['swig', 'swig_opt']: o = '--'+c.replace('_', '-') v = getattr(build_ext, c, None) if v: if getattr(self, c): log.warn('both build_src and build_ext define %s option' % (o)) else: log.info('using "%s=%s" option from build_ext command' % (o, v)) setattr(self, c, v) def run(self): log.info("build_src") if not (self.extensions or self.libraries): return self.build_sources() def build_sources(self): if self.inplace: self.get_package_dir = \ self.get_finalized_command('build_py').get_package_dir self.build_py_modules_sources() for libname_info in self.libraries: self.build_library_sources(*libname_info) if self.extensions: self.check_extensions_list(self.extensions) for ext in self.extensions: self.build_extension_sources(ext) self.build_data_files_sources() self.build_npy_pkg_config() def build_data_files_sources(self): if not self.data_files: return log.info('building data_files sources') from numpy.distutils.misc_util import get_data_files new_data_files = [] for data in self.data_files: if isinstance(data, str): new_data_files.append(data) elif isinstance(data, tuple): d, files = data if self.inplace: build_dir = self.get_package_dir('.'.join(d.split(os.sep))) else: build_dir = os.path.join(self.build_src, d) funcs = [f for f in files if hasattr(f, '__call__')] files = [f for f in files if not hasattr(f, '__call__')] for f in funcs: if f.__code__.co_argcount==1: s = f(build_dir) else: s = f() if s is not None: if isinstance(s, list): files.extend(s) elif isinstance(s, str): files.append(s) else: raise TypeError(repr(s)) filenames = get_data_files((d, files)) new_data_files.append((d, filenames)) else: raise TypeError(repr(data)) self.data_files[:] = new_data_files def _build_npy_pkg_config(self, info, gd): template, install_dir, subst_dict = info template_dir = os.path.dirname(template) for k, v in gd.items(): subst_dict[k] = v if self.inplace == 1: generated_dir = os.path.join(template_dir, install_dir) else: generated_dir = os.path.join(self.build_src, template_dir, install_dir) generated = os.path.basename(os.path.splitext(template)[0]) generated_path = os.path.join(generated_dir, generated) if not os.path.exists(generated_dir): os.makedirs(generated_dir) subst_vars(generated_path, template, subst_dict) # Where to install relatively to install prefix full_install_dir = os.path.join(template_dir, install_dir) return full_install_dir, generated_path def build_npy_pkg_config(self): log.info('build_src: building npy-pkg config files') # XXX: another ugly workaround to circumvent distutils brain damage. We # need the install prefix here, but finalizing the options of the # install command when only building sources cause error. Instead, we # copy the install command instance, and finalize the copy so that it # does not disrupt how distutils want to do things when with the # original install command instance. install_cmd = copy.copy(get_cmd('install')) if not install_cmd.finalized == 1: install_cmd.finalize_options() build_npkg = False if self.inplace == 1: top_prefix = '.' build_npkg = True elif hasattr(install_cmd, 'install_libbase'): top_prefix = install_cmd.install_libbase build_npkg = True if build_npkg: for pkg, infos in self.distribution.installed_pkg_config.items(): pkg_path = self.distribution.package_dir[pkg] prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) d = {'prefix': prefix} for info in infos: install_dir, generated = self._build_npy_pkg_config(info, d) self.distribution.data_files.append((install_dir, [generated])) def build_py_modules_sources(self): if not self.py_modules: return log.info('building py_modules sources') new_py_modules = [] for source in self.py_modules: if is_sequence(source) and len(source)==3: package, module_base, source = source if self.inplace: build_dir = self.get_package_dir(package) else: build_dir = os.path.join(self.build_src, os.path.join(*package.split('.'))) if hasattr(source, '__call__'): target = os.path.join(build_dir, module_base + '.py') source = source(target) if source is None: continue modules = [(package, module_base, source)] if package not in self.py_modules_dict: self.py_modules_dict[package] = [] self.py_modules_dict[package] += modules else: new_py_modules.append(source) self.py_modules[:] = new_py_modules def build_library_sources(self, lib_name, build_info): sources = list(build_info.get('sources', [])) if not sources: return log.info('building library "%s" sources' % (lib_name)) sources = self.generate_sources(sources, (lib_name, build_info)) sources = self.template_sources(sources, (lib_name, build_info)) sources, h_files = self.filter_h_files(sources) if h_files: log.info('%s - nothing done with h_files = %s', self.package, h_files) #for f in h_files: # self.distribution.headers.append((lib_name,f)) build_info['sources'] = sources return def build_extension_sources(self, ext): sources = list(ext.sources) log.info('building extension "%s" sources' % (ext.name)) fullname = self.get_ext_fullname(ext.name) modpath = fullname.split('.') package = '.'.join(modpath[0:-1]) if self.inplace: self.ext_target_dir = self.get_package_dir(package) sources = self.generate_sources(sources, ext) sources = self.template_sources(sources, ext) sources = self.swig_sources(sources, ext) sources = self.f2py_sources(sources, ext) sources = self.pyrex_sources(sources, ext) sources, py_files = self.filter_py_files(sources) if package not in self.py_modules_dict: self.py_modules_dict[package] = [] modules = [] for f in py_files: module = os.path.splitext(os.path.basename(f))[0] modules.append((package, module, f)) self.py_modules_dict[package] += modules sources, h_files = self.filter_h_files(sources) if h_files: log.info('%s - nothing done with h_files = %s', package, h_files) #for f in h_files: # self.distribution.headers.append((package,f)) ext.sources = sources def generate_sources(self, sources, extension): new_sources = [] func_sources = [] for source in sources: if is_string(source): new_sources.append(source) else: func_sources.append(source) if not func_sources: return new_sources if self.inplace and not is_sequence(extension): build_dir = self.ext_target_dir else: if is_sequence(extension): name = extension[0] # if 'include_dirs' not in extension[1]: # extension[1]['include_dirs'] = [] # incl_dirs = extension[1]['include_dirs'] else: name = extension.name # incl_dirs = extension.include_dirs #if self.build_src not in incl_dirs: # incl_dirs.append(self.build_src) build_dir = os.path.join(*([self.build_src] +name.split('.')[:-1])) self.mkpath(build_dir) for func in func_sources: source = func(extension, build_dir) if not source: continue if is_sequence(source): [log.info(" adding '%s' to sources." % (s,)) for s in source] new_sources.extend(source) else: log.info(" adding '%s' to sources." % (source,)) new_sources.append(source) return new_sources def filter_py_files(self, sources): return self.filter_files(sources, ['.py']) def filter_h_files(self, sources): return self.filter_files(sources, ['.h', '.hpp', '.inc']) def filter_files(self, sources, exts = []): new_sources = [] files = [] for source in sources: (base, ext) = os.path.splitext(source) if ext in exts: files.append(source) else: new_sources.append(source) return new_sources, files def template_sources(self, sources, extension): new_sources = [] if is_sequence(extension): depends = extension[1].get('depends') include_dirs = extension[1].get('include_dirs') else: depends = extension.depends include_dirs = extension.include_dirs for source in sources: (base, ext) = os.path.splitext(source) if ext == '.src': # Template file if self.inplace: target_dir = os.path.dirname(base) else: target_dir = appendpath(self.build_src, os.path.dirname(base)) self.mkpath(target_dir) target_file = os.path.join(target_dir, os.path.basename(base)) if (self.force or newer_group([source] + depends, target_file)): if _f_pyf_ext_match(base): log.info("from_template:> %s" % (target_file)) outstr = process_f_file(source) else: log.info("conv_template:> %s" % (target_file)) outstr = process_c_file(source) with open(target_file, 'w') as fid: fid.write(outstr) if _header_ext_match(target_file): d = os.path.dirname(target_file) if d not in include_dirs: log.info(" adding '%s' to include_dirs." % (d)) include_dirs.append(d) new_sources.append(target_file) else: new_sources.append(source) return new_sources def pyrex_sources(self, sources, extension): """Pyrex not supported; this remains for Cython support (see below)""" new_sources = [] ext_name = extension.name.split('.')[-1] for source in sources: (base, ext) = os.path.splitext(source) if ext == '.pyx': target_file = self.generate_a_pyrex_source(base, ext_name, source, extension) new_sources.append(target_file) else: new_sources.append(source) return new_sources def generate_a_pyrex_source(self, base, ext_name, source, extension): """Pyrex is not supported, but some projects monkeypatch this method. That allows compiling Cython code, see gh-6955. This method will remain here for compatibility reasons. """ return [] def f2py_sources(self, sources, extension): new_sources = [] f2py_sources = [] f_sources = [] f2py_targets = {} target_dirs = [] ext_name = extension.name.split('.')[-1] skip_f2py = 0 for source in sources: (base, ext) = os.path.splitext(source) if ext == '.pyf': # F2PY interface file if self.inplace: target_dir = os.path.dirname(base) else: target_dir = appendpath(self.build_src, os.path.dirname(base)) if os.path.isfile(source): name = get_f2py_modulename(source) if name != ext_name: raise DistutilsSetupError('mismatch of extension names: %s ' 'provides %r but expected %r' % ( source, name, ext_name)) target_file = os.path.join(target_dir, name+'module.c') else: log.debug(' source %s does not exist: skipping f2py\'ing.' \ % (source)) name = ext_name skip_f2py = 1 target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): log.warn(' target %s does not exist:\n '\ 'Assuming %smodule.c was generated with '\ '"build_src --inplace" command.' \ % (target_file, name)) target_dir = os.path.dirname(base) target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): raise DistutilsSetupError("%r missing" % (target_file,)) log.info(' Yes! Using %r as up-to-date target.' \ % (target_file)) target_dirs.append(target_dir) f2py_sources.append(source) f2py_targets[source] = target_file new_sources.append(target_file) elif fortran_ext_match(ext): f_sources.append(source) else: new_sources.append(source) if not (f2py_sources or f_sources): return new_sources for d in target_dirs: self.mkpath(d) f2py_options = extension.f2py_options + self.f2py_opts if self.distribution.libraries: for name, build_info in self.distribution.libraries: if name in extension.libraries: f2py_options.extend(build_info.get('f2py_options', [])) log.info("f2py options: %s" % (f2py_options)) if f2py_sources: if len(f2py_sources) != 1: raise DistutilsSetupError( 'only one .pyf file is allowed per extension module but got'\ ' more: %r' % (f2py_sources,)) source = f2py_sources[0] target_file = f2py_targets[source] target_dir = os.path.dirname(target_file) or '.' depends = [source] + extension.depends if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py: %s" % (source)) import numpy.f2py numpy.f2py.run_main(f2py_options + ['--build-dir', target_dir, source]) else: log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) else: #XXX TODO: --inplace support for sdist command if is_sequence(extension): name = extension[0] else: name = extension.name target_dir = os.path.join(*([self.build_src] +name.split('.')[:-1])) target_file = os.path.join(target_dir, ext_name + 'module.c') new_sources.append(target_file) depends = f_sources + extension.depends if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py:> %s" % (target_file)) self.mkpath(target_dir) import numpy.f2py numpy.f2py.run_main(f2py_options + ['--lower', '--build-dir', target_dir]+\ ['-m', ext_name]+f_sources) else: log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ % (target_file)) if not os.path.isfile(target_file): raise DistutilsError("f2py target file %r not generated" % (target_file,)) build_dir = os.path.join(self.build_src, target_dir) target_c = os.path.join(build_dir, 'fortranobject.c') target_h = os.path.join(build_dir, 'fortranobject.h') log.info(" adding '%s' to sources." % (target_c)) new_sources.append(target_c) if build_dir not in extension.include_dirs: log.info(" adding '%s' to include_dirs." % (build_dir)) extension.include_dirs.append(build_dir) if not skip_f2py: import numpy.f2py d = os.path.dirname(numpy.f2py.__file__) source_c = os.path.join(d, 'src', 'fortranobject.c') source_h = os.path.join(d, 'src', 'fortranobject.h') if newer(source_c, target_c) or newer(source_h, target_h): self.mkpath(os.path.dirname(target_c)) self.copy_file(source_c, target_c) self.copy_file(source_h, target_h) else: if not os.path.isfile(target_c): raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) if not os.path.isfile(target_h): raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: filename = os.path.join(target_dir, ext_name + name_ext) if os.path.isfile(filename): log.info(" adding '%s' to sources." % (filename)) f_sources.append(filename) return new_sources + f_sources def swig_sources(self, sources, extension): # Assuming SWIG 1.3.14 or later. See compatibility note in # http://www.swig.org/Doc1.3/Python.html#Python_nn6 new_sources = [] swig_sources = [] swig_targets = {} target_dirs = [] py_files = [] # swig generated .py files target_ext = '.c' if '-c++' in extension.swig_opts: typ = 'c++' is_cpp = True extension.swig_opts.remove('-c++') elif self.swig_cpp: typ = 'c++' is_cpp = True else: typ = None is_cpp = False skip_swig = 0 ext_name = extension.name.split('.')[-1] for source in sources: (base, ext) = os.path.splitext(source) if ext == '.i': # SWIG interface file # the code below assumes that the sources list # contains not more than one .i SWIG interface file if self.inplace: target_dir = os.path.dirname(base) py_target_dir = self.ext_target_dir else: target_dir = appendpath(self.build_src, os.path.dirname(base)) py_target_dir = target_dir if os.path.isfile(source): name = get_swig_modulename(source) if name != ext_name[1:]: raise DistutilsSetupError( 'mismatch of extension names: %s provides %r' ' but expected %r' % (source, name, ext_name[1:])) if typ is None: typ = get_swig_target(source) is_cpp = typ=='c++' else: typ2 = get_swig_target(source) if typ2 is None: log.warn('source %r does not define swig target, assuming %s swig target' \ % (source, typ)) elif typ!=typ2: log.warn('expected %r but source %r defines %r swig target' \ % (typ, source, typ2)) if typ2=='c++': log.warn('resetting swig target to c++ (some targets may have .c extension)') is_cpp = True else: log.warn('assuming that %r has c++ swig target' % (source)) if is_cpp: target_ext = '.cpp' target_file = os.path.join(target_dir, '%s_wrap%s' \ % (name, target_ext)) else: log.warn(' source %s does not exist: skipping swig\'ing.' \ % (source)) name = ext_name[1:] skip_swig = 1 target_file = _find_swig_target(target_dir, name) if not os.path.isfile(target_file): log.warn(' target %s does not exist:\n '\ 'Assuming %s_wrap.{c,cpp} was generated with '\ '"build_src --inplace" command.' \ % (target_file, name)) target_dir = os.path.dirname(base) target_file = _find_swig_target(target_dir, name) if not os.path.isfile(target_file): raise DistutilsSetupError("%r missing" % (target_file,)) log.warn(' Yes! Using %r as up-to-date target.' \ % (target_file)) target_dirs.append(target_dir) new_sources.append(target_file) py_files.append(os.path.join(py_target_dir, name+'.py')) swig_sources.append(source) swig_targets[source] = new_sources[-1] else: new_sources.append(source) if not swig_sources: return new_sources if skip_swig: return new_sources + py_files for d in target_dirs: self.mkpath(d) swig = self.swig or self.find_swig() swig_cmd = [swig, "-python"] + extension.swig_opts if is_cpp: swig_cmd.append('-c++') for d in extension.include_dirs: swig_cmd.append('-I'+d) for source in swig_sources: target = swig_targets[source] depends = [source] + extension.depends if self.force or newer_group(depends, target, 'newer'): log.info("%s: %s" % (os.path.basename(swig) \ + (is_cpp and '++' or ''), source)) self.spawn(swig_cmd + self.swig_opts \ + ["-o", target, '-outdir', py_target_dir, source]) else: log.debug(" skipping '%s' swig interface (up-to-date)" \ % (source)) return new_sources + py_files _f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match _header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match #### SWIG related auxiliary functions #### _swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)', re.I).match _has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search _has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search def get_swig_target(source): with open(source, 'r') as f: result = None line = f.readline() if _has_cpp_header(line): result = 'c++' if _has_c_header(line): result = 'c' return result def get_swig_modulename(source): with open(source, 'r') as f: name = None for line in f: m = _swig_module_name_match(line) if m: name = m.group('name') break return name def _find_swig_target(target_dir, name): for ext in ['.cpp', '.c']: target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) if os.path.isfile(target): break return target #### F2PY related auxiliary functions #### _f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)', re.I).match _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?' r'__user__[\w_]*)', re.I).match def get_f2py_modulename(source): name = None with open(source) as f: for line in f: m = _f2py_module_name_match(line) if m: if _f2py_user_module_name_match(line): # skip *__user__* names continue name = m.group('name') break return name ##########################################
import http.server import multiprocessing import os import re import socket import ssl import sys import ttfw_idf from tiny_test_fw import DUT, Utility server_cert = '-----BEGIN CERTIFICATE-----\n' \ 'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\ 'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\ 'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\ 'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\ 'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\ 'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\ 'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\ 'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\ 'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\ 'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\ 'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\ '3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\ 'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\ 'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\ 'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\ '3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\ 'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\ 'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\ '-----END CERTIFICATE-----\n' server_key = '-----BEGIN PRIVATE KEY-----\n'\ 'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\ 'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\ 'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\ 'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\ '2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\ 'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\ 'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\ 'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\ 'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\ 'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\ 'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\ 'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\ 'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\ '8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\ 'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\ 'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\ 'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\ 'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\ 'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\ '1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\ '10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\ 'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\ 'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\ 'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\ 'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\ 'vSXnRLaxQhooWm+IuX9SuBQ=\n'\ '-----END PRIVATE KEY-----\n' def get_my_ip(): s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s1.connect(('8.8.8.8', 80)) my_ip = s1.getsockname()[0] s1.close() return my_ip def get_server_status(host_ip, server_port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_status = sock.connect_ex((host_ip, server_port)) sock.close() if server_status == 0: return True return False def start_https_server(ota_image_dir, server_ip, server_port, server_file=None, key_file=None): os.chdir(ota_image_dir) if server_file is None: server_file = os.path.join(ota_image_dir, 'server_cert.pem') cert_file_handle = open(server_file, 'w+') cert_file_handle.write(server_cert) cert_file_handle.close() if key_file is None: key_file = os.path.join(ota_image_dir, 'server_key.pem') key_file_handle = open('server_key.pem', 'w+') key_file_handle.write(server_key) key_file_handle.close() httpd = http.server.HTTPServer((server_ip, server_port), http.server.SimpleHTTPRequestHandler) httpd.socket = ssl.wrap_socket(httpd.socket, keyfile=key_file, certfile=server_file, server_side=True) httpd.serve_forever() def check_sha256(sha256_expected, sha256_reported): Utility.console_log('sha256_expected: %s' % (sha256_expected)) Utility.console_log('sha256_reported: %s' % (sha256_reported)) if sha256_reported not in sha256_expected: raise ValueError('SHA256 mismatch') else: Utility.console_log('SHA256 expected and reported are the same') def calc_all_sha256(dut): bootloader_path = os.path.join(dut.app.binary_path, 'bootloader', 'bootloader.bin') output = dut.image_info(bootloader_path) sha256_bootloader = re.search(r'Validation Hash:\s+([a-f0-9]+)', output).group(1) Utility.console_log('bootloader SHA256: %s' % sha256_bootloader) app_path = os.path.join(dut.app.binary_path, 'simple_ota.bin') output = dut.image_info(app_path) sha256_app = re.search(r'Validation Hash:\s+([a-f0-9]+)', output).group(1) Utility.console_log('app SHA256: %s' % sha256_app) return sha256_bootloader, sha256_app @ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA', nightly_run=True) def test_examples_protocol_simple_ota_example(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT) # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) sha256_bootloader, sha256_app = calc_all_sha256(dut1) # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x10000', timeout=30) check_sha256(sha256_bootloader, dut1.expect(re.compile(r'SHA-256 for bootloader:\s+([a-f0-9]+)'))[0]) check_sha256(sha256_app, dut1.expect(re.compile(r'SHA-256 for current firmware:\s+([a-f0-9]+)'))[0]) try: ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Loaded app from partition at offset 0x110000', timeout=60) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() @ttfw_idf.idf_example_test(env_tag='Example_EthKitV1') def test_examples_protocol_simple_ota_example_ethernet_with_spiram_config(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='spiram') # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x10000', timeout=30) try: ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Loaded app from partition at offset 0x110000', timeout=60) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() @ttfw_idf.idf_example_test(env_tag='Example_Flash_Encryption_OTA') def test_examples_protocol_simple_ota_example_with_flash_encryption(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='flash_enc') # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) # erase flash on the device print('Erasing the flash in order to have an empty NVS key partiton') dut1.erase_flash() # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x20000', timeout=30) dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10) try: ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Loaded app from partition at offset 0x120000', timeout=60) dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() @ttfw_idf.idf_example_test(env_tag='Example_Flash_Encryption_OTA_WiFi', target=['esp32c3'], nightly_run=True) def test_examples_protocol_simple_ota_example_with_flash_encryption_wifi(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', app_config_name='flash_enc_wifi') # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) # erase flash on the device print('Erasing the flash in order to have an empty NVS key partiton') dut1.erase_flash() # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x20000', timeout=30) dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10) try: ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Loaded app from partition at offset 0x120000', timeout=60) dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() @ttfw_idf.idf_example_test(env_tag='Example_EthKitV1') def test_examples_protocol_simple_ota_example_with_verify_app_signature_on_update_no_secure_boot_ecdsa(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='on_update_no_sb_ecdsa') # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) sha256_bootloader, sha256_app = calc_all_sha256(dut1) # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x20000', timeout=30) check_sha256(sha256_bootloader, dut1.expect(re.compile(r'SHA-256 for bootloader:\s+([a-f0-9]+)'))[0]) check_sha256(sha256_app, dut1.expect(re.compile(r'SHA-256 for current firmware:\s+([a-f0-9]+)'))[0]) try: ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Writing to partition subtype 16 at offset 0x120000', timeout=20) dut1.expect('Verifying image signature...', timeout=60) dut1.expect('Loaded app from partition at offset 0x120000', timeout=20) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() @ttfw_idf.idf_example_test(env_tag='Example_EthKitV12') def test_examples_protocol_simple_ota_example_with_verify_app_signature_on_update_no_secure_boot_rsa(env, extra_data): """ steps: | 1. join AP 2. Fetch OTA image over HTTPS 3. Reboot with the new OTA image """ dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='on_update_no_sb_rsa') # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024)) sha256_bootloader, sha256_app = calc_all_sha256(dut1) # start test host_ip = get_my_ip() if (get_server_status(host_ip, 8000) is False): thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000)) thread1.daemon = True thread1.start() dut1.start_app() dut1.expect('Loaded app from partition at offset 0x20000', timeout=30) check_sha256(sha256_bootloader, dut1.expect(re.compile(r'SHA-256 for bootloader:\s+([a-f0-9]+)'))[0]) check_sha256(sha256_app, dut1.expect(re.compile(r'SHA-256 for current firmware:\s+([a-f0-9]+)'))[0]) try: ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30) print('Connected to AP with IP: {}'.format(ip_address)) except DUT.ExpectTimeout: raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP') thread1.terminate() dut1.expect('Starting OTA example', timeout=30) print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin')) dut1.write('https://' + host_ip + ':8000/simple_ota.bin') dut1.expect('Writing to partition subtype 16 at offset 0x120000', timeout=20) dut1.expect('Verifying image signature...', timeout=60) dut1.expect('#0 app key digest == #0 trusted key digest', timeout=10) dut1.expect('Verifying with RSA-PSS...', timeout=10) dut1.expect('Signature verified successfully!', timeout=10) dut1.expect('Loaded app from partition at offset 0x120000', timeout=20) dut1.expect('Starting OTA example', timeout=30) thread1.terminate() if __name__ == '__main__': if sys.argv[2:]: # if two or more arguments provided: # Usage: example_test.py <image_dir> <server_port> [cert_di>] this_dir = os.path.dirname(os.path.realpath(__file__)) bin_dir = os.path.join(this_dir, sys.argv[1]) port = int(sys.argv[2]) cert_dir = bin_dir if not sys.argv[3:] else os.path.join(this_dir, sys.argv[3]) # optional argument print('Starting HTTPS server at "https://:{}"'.format(port)) start_https_server(bin_dir, '', port, server_file=os.path.join(cert_dir, 'ca_cert.pem'), key_file=os.path.join(cert_dir, 'ca_key.pem')) else: test_examples_protocol_simple_ota_example() test_examples_protocol_simple_ota_example_ethernet_with_spiram_config() test_examples_protocol_simple_ota_example_with_flash_encryption() test_examples_protocol_simple_ota_example_with_flash_encryption_wifi() test_examples_protocol_simple_ota_example_with_verify_app_signature_on_update_no_secure_boot_ecdsa() test_examples_protocol_simple_ota_example_with_verify_app_signature_on_update_no_secure_boot_rsa()
__author__ = 'chris' import json import random import time import nacl.signing import bitcoin from hashlib import sha256 from binascii import unhexlify, hexlify from collections import OrderedDict from urllib2 import Request, urlopen, URLError import re import os import nacl.encoding from twisted.internet import reactor from protos.objects import Listings from protos.countries import CountryCode from dht.utils import digest from constants import DATA_FOLDER from market.profile import Profile from keyutils.keys import KeyChain from keyutils.bip32utils import derive_childkey from log import Logger class Contract(object): """ A class for creating and interacting with OpenBazaar Ricardian contracts. """ def __init__(self, database, contract=None, hash_value=None, testnet=False): """ This class can be instantiated with either an `OrderedDict` or a hash of a contract. If a hash is used, we will load the contract from either the file system or cache. Alternatively, pass in no parameters if the intent is to create a new contract. Args: contract: an `OrderedDict` containing a filled out json contract hash: a hash160 (in raw bytes) of a contract testnet: is this contract on the testnet """ self.db = database self.keychain = KeyChain(self.db) if contract is not None: self.contract = contract elif hash_value is not None: try: file_path = self.db.HashMap().get_file(hash_value) if file_path is None: file_path = DATA_FOLDER + "cache/" + hexlify(hash_value) with open(file_path, 'r') as filename: self.contract = json.load(filename, object_pairs_hook=OrderedDict) except Exception: try: file_path = DATA_FOLDER + "purchases/in progress/" + hexlify(hash_value) + ".json" with open(file_path, 'r') as filename: self.contract = json.load(filename, object_pairs_hook=OrderedDict) except Exception: self.contract = {} else: self.contract = {} self.log = Logger(system=self) # used when purchasing this contract self.testnet = testnet self.ws = None self.blockchain = None self.amount_funded = 0 self.received_txs = [] self.timeout = None self.is_purchase = False def create(self, expiration_date, metadata_category, title, description, currency_code, price, process_time, nsfw, shipping_origin=None, shipping_regions=None, est_delivery_domestic=None, est_delivery_international=None, terms_conditions=None, returns=None, keywords=None, category=None, condition=None, sku=None, images=None, free_shipping=None, shipping_currency_code=None, shipping_domestic=None, shipping_international=None, options=None, moderators=None): """ All parameters are strings except: :param expiration_date: `string` (must be formatted UTC datetime) :param keywords: `list` :param nsfw: `boolean` :param images: a `list` of image files :param free_shipping: `boolean` :param shipping_origin: a 'string' formatted `CountryCode` :param shipping_regions: a 'list' of 'string' formatted `CountryCode`s :param options: a 'dict' containing options as keys and 'list' as option values. :param moderators: a 'list' of 'string' guids (hex encoded). """ profile = Profile(self.db).get() self.contract = OrderedDict( { "vendor_offer": { "listing": { "metadata": { "version": "0.1", "category": metadata_category.lower(), "category_sub": "fixed price" }, "id": { "guid": self.keychain.guid.encode("hex"), "pubkeys": { "guid": self.keychain.guid_signed_pubkey[64:].encode("hex"), "bitcoin": bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey), "encryption": self.keychain.encryption_pubkey.encode("hex") } }, "item": { "title": title, "description": description, "process_time": process_time, "price_per_unit": {}, "nsfw": nsfw } } } } ) if expiration_date.lower() == "never": self.contract["vendor_offer"]["listing"]["metadata"]["expiry"] = "never" else: self.contract["vendor_offer"]["listing"]["metadata"]["expiry"] = expiration_date + " UTC" if metadata_category == "physical good" and condition is not None: self.contract["vendor_offer"]["listing"]["item"]["condition"] = condition if currency_code.upper() == "BTC": item = self.contract["vendor_offer"]["listing"]["item"] item["price_per_unit"]["bitcoin"] = price else: item = self.contract["vendor_offer"]["listing"]["item"] item["price_per_unit"]["fiat"] = {} item["price_per_unit"]["fiat"]["price"] = price item["price_per_unit"]["fiat"]["currency_code"] = currency_code if keywords is not None: self.contract["vendor_offer"]["listing"]["item"]["keywords"] = [] self.contract["vendor_offer"]["listing"]["item"]["keywords"].extend(keywords) if category is not None: self.contract["vendor_offer"]["listing"]["item"]["category"] = category if sku is not None: self.contract["vendor_offer"]["listing"]["item"]["sku"] = sku if options is not None: self.contract["vendor_offer"]["listing"]["item"]["options"] = options if metadata_category == "physical good": self.contract["vendor_offer"]["listing"]["shipping"] = {} shipping = self.contract["vendor_offer"]["listing"]["shipping"] shipping["shipping_origin"] = shipping_origin if free_shipping is False: self.contract["vendor_offer"]["listing"]["shipping"]["free"] = False self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"] = {} if shipping_currency_code == "BTC": self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"] = {} self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"][ "domestic"] = shipping_domestic self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"][ "international"] = shipping_international else: shipping = self.contract["vendor_offer"]["listing"]["shipping"] shipping["flat_fee"]["fiat"] = {} shipping["flat_fee"]["fiat"]["price"] = {} shipping["flat_fee"]["fiat"]["price"][ "domestic"] = shipping_domestic shipping["flat_fee"]["fiat"]["price"][ "international"] = shipping_international shipping["flat_fee"]["fiat"][ "currency_code"] = shipping_currency_code else: self.contract["vendor_offer"]["listing"]["shipping"]["free"] = True self.contract["vendor_offer"]["listing"]["shipping"]["shipping_regions"] = [] for region in shipping_regions: shipping = self.contract["vendor_offer"]["listing"]["shipping"] shipping["shipping_regions"].append(region) listing = self.contract["vendor_offer"]["listing"] listing["shipping"]["est_delivery"] = {} listing["shipping"]["est_delivery"]["domestic"] = est_delivery_domestic listing["shipping"]["est_delivery"][ "international"] = est_delivery_international if profile.HasField("handle"): self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"] = profile.handle if images is not None: self.contract["vendor_offer"]["listing"]["item"]["image_hashes"] = [] for image_hash in images: self.contract["vendor_offer"]["listing"]["item"]["image_hashes"].append(image_hash) if terms_conditions is not None or returns is not None: self.contract["vendor_offer"]["listing"]["policy"] = {} if terms_conditions is not None: self.contract["vendor_offer"]["listing"]["policy"]["terms_conditions"] = terms_conditions if returns is not None: self.contract["vendor_offer"]["listing"]["policy"]["returns"] = returns if moderators is not None: self.contract["vendor_offer"]["listing"]["moderators"] = [] for mod in moderators: mod_info = self.db.ModeratorStore().get_moderator(unhexlify(mod)) print mod_info if mod_info is not None: moderator = { "guid": mod, "blockchain_id": mod_info[6], "pubkeys": { "signing": { "key": mod_info[1][64:].encode("hex"), "signature": mod_info[1][:64].encode("hex") }, "encryption": { "key": mod_info[2].encode("hex"), "signature": mod_info[3].encode("hex") }, "bitcoin": { "key": mod_info[4].encode("hex"), "signature": mod_info[5].encode("hex") } } } self.contract["vendor_offer"]["listing"]["moderators"].append(moderator) listing = json.dumps(self.contract["vendor_offer"]["listing"], indent=4) self.contract["vendor_offer"]["signature"] = \ self.keychain.signing_key.sign(listing, encoder=nacl.encoding.HexEncoder)[:128] self.save() def add_purchase_info(self, quantity, ship_to=None, shipping_address=None, city=None, state=None, postal_code=None, country=None, moderator=None, options=None): """ Update the contract with the buyer's purchase information. """ profile = Profile(self.db).get() order_json = { "buyer_order": { "order": { "ref_hash": digest(json.dumps(self.contract, indent=4)).encode("hex"), "quantity": quantity, "id": { "guid": self.keychain.guid.encode("hex"), "pubkeys": { "guid": self.keychain.guid_signed_pubkey[64:].encode("hex"), "bitcoin": bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey), "encryption": self.keychain.encryption_pubkey.encode("hex") } }, "payment": {} } } } if profile.HasField("handle"): order_json["buyer_order"]["order"]["id"]["blockchain_id"] = profile.handle if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good": order_json["buyer_order"]["order"]["shipping"] = {} order_json["buyer_order"]["order"]["shipping"]["ship_to"] = ship_to order_json["buyer_order"]["order"]["shipping"]["address"] = shipping_address order_json["buyer_order"]["order"]["shipping"]["city"] = city order_json["buyer_order"]["order"]["shipping"]["state"] = state order_json["buyer_order"]["order"]["shipping"]["postal_code"] = postal_code order_json["buyer_order"]["order"]["shipping"]["country"] = country if options is not None: order_json["buyer_order"]["order"]["options"] = options if moderator: # TODO: Handle direct payments chaincode = sha256(str(random.getrandbits(256))).digest().encode("hex") order_json["buyer_order"]["order"]["payment"]["chaincode"] = chaincode valid_mod = False for mod in self.contract["vendor_offer"]["listing"]["moderators"]: if mod["guid"] == moderator: order_json["buyer_order"]["order"]["moderator"] = moderator masterkey_m = mod["pubkeys"]["bitcoin"]["key"] valid_mod = True if not valid_mod: return False masterkey_b = bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey) masterkey_v = self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["bitcoin"] buyer_key = derive_childkey(masterkey_b, chaincode) vendor_key = derive_childkey(masterkey_v, chaincode) moderator_key = derive_childkey(masterkey_m, chaincode) redeem_script = '75' + bitcoin.mk_multisig_script([buyer_key, vendor_key, moderator_key], 2) order_json["buyer_order"]["order"]["payment"]["redeem_script"] = redeem_script if self.testnet: payment_address = bitcoin.p2sh_scriptaddr(redeem_script, 196) else: payment_address = bitcoin.p2sh_scriptaddr(redeem_script) order_json["buyer_order"]["order"]["payment"]["address"] = payment_address price_json = self.contract["vendor_offer"]["listing"]["item"]["price_per_unit"] if "bitcoin" in price_json: order_json["buyer_order"]["order"]["payment"]["amount"] = price_json["bitcoin"] else: currency_code = price_json["fiat"]["currency_code"] fiat_price = price_json["fiat"]["price"] try: request = Request('https://api.bitcoinaverage.com/ticker/' + currency_code.upper() + '/last') response = urlopen(request) conversion_rate = response.read() except URLError: return False order_json["buyer_order"]["order"]["payment"]["amount"] = float( "{0:.8f}".format(float(fiat_price) / float(conversion_rate))) self.contract["buyer_order"] = order_json["buyer_order"] order = json.dumps(self.contract["buyer_order"]["order"], indent=4) self.contract["buyer_order"]["signature"] = \ self.keychain.signing_key.sign(order, encoder=nacl.encoding.HexEncoder)[:128] return self.contract["buyer_order"]["order"]["payment"]["address"] def add_order_confirmation(self, payout_address, comments=None, shipper=None, tracking_number=None, est_delivery=None, url=None, password=None): """ Add the vendor's order confirmation to the contract. """ if not self.testnet and not (payout_address[:1] == "1" or payout_address[:1] == "3"): raise Exception("Bitcoin address is not a mainnet address") elif self.testnet and not \ (payout_address[:1] == "n" or payout_address[:1] == "m" or payout_address[:1] == "2"): raise Exception("Bitcoin address is not a testnet address") try: bitcoin.b58check_to_hex(payout_address) except AssertionError: raise Exception("Invalid Bitcoin address") conf_json = { "vendor_order_confirmation": { "invoice": { "ref_hash": digest(json.dumps(self.contract, indent=4)).encode("hex"), "payout_address": payout_address } } } if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good": shipping = {"shipper": shipper, "tracking_number": tracking_number, "est_delivery": est_delivery} conf_json["vendor_order_confirmation"]["invoice"]["shipping"] = shipping elif self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "digital good": content_source = {"url": url, "password": password} conf_json["vendor_order_confirmation"]["invoice"]["content_source"] = content_source if comments: conf_json["vendor_order_confirmation"]["invoice"]["comments"] = comments confirmation = json.dumps(conf_json["vendor_order_confirmation"]["invoice"], indent=4) conf_json["vendor_order_confirmation"]["signature"] = \ self.keychain.signing_key.sign(confirmation, encoder=nacl.encoding.HexEncoder)[:128] order_id = digest(json.dumps(self.contract, indent=4)).encode("hex") self.contract["vendor_order_confirmation"] = conf_json["vendor_order_confirmation"] self.db.Sales().update_status(order_id, 2) file_path = DATA_FOLDER + "store/listings/in progress/" + order_id + ".json" with open(file_path, 'w') as outfile: outfile.write(json.dumps(self.contract, indent=4)) def accept_order_confirmation(self, ws, confirmation_json=None): """ Validate the order confirmation sent over from the seller and update our node accordingly. """ self.ws = ws try: if confirmation_json: self.contract["vendor_order_confirmation"] = json.loads(confirmation_json, object_pairs_hook=OrderedDict) contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict) del contract_dict["vendor_order_confirmation"] contract_hash = digest(json.dumps(contract_dict, indent=4)).encode("hex") ref_hash = self.contract["vendor_order_confirmation"]["invoice"]["ref_hash"] if ref_hash != contract_hash: raise Exception("Order number doesn't match") if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good": shipping = self.contract["vendor_order_confirmation"]["invoice"]["shipping"] if "tracking_number" not in shipping or "shipper" not in shipping: raise Exception("No shipping information") # update the order status in the db self.db.Purchases().update_status(contract_hash, 2) file_path = DATA_FOLDER + "purchases/in progress/" + contract_hash + ".json" # update the contract in the file system with open(file_path, 'w') as outfile: outfile.write(json.dumps(self.contract, indent=4)) message_json = { "order_confirmation": { "order_id": contract_hash, "title": self.contract["vendor_offer"]["listing"]["item"]["title"] } } # push the message over websockets self.ws.push(json.dumps(message_json, indent=4)) return contract_hash except Exception: return False def await_funding(self, websocket_server, libbitcoin_client, proofSig, is_purchase=True): """ Saves the contract to the file system and db as an unfunded contract. Listens on the libbitcoin server for the multisig address to be funded. Deletes the unfunded contract from the file system and db if it goes unfunded for more than 10 minutes. """ # TODO: Handle direct payments self.ws = websocket_server self.blockchain = libbitcoin_client self.is_purchase = is_purchase order_id = digest(json.dumps(self.contract, indent=4)).encode("hex") payment_address = self.contract["buyer_order"]["order"]["payment"]["address"] vendor_item = self.contract["vendor_offer"]["listing"]["item"] if "image_hashes" in vendor_item: thumbnail_hash = vendor_item["image_hashes"][0] else: thumbnail_hash = "" if "blockchain_id" in self.contract["vendor_offer"]["listing"]["id"]: vendor = self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"] else: vendor = self.contract["vendor_offer"]["listing"]["id"]["guid"] if is_purchase: file_path = DATA_FOLDER + "purchases/in progress/" + order_id + ".json" self.db.Purchases().new_purchase(order_id, self.contract["vendor_offer"]["listing"]["item"]["title"], time.time(), self.contract["buyer_order"]["order"]["payment"]["amount"], payment_address, 0, thumbnail_hash, vendor, proofSig) else: file_path = DATA_FOLDER + "store/listings/in progress/" + order_id + ".json" self.db.Sales().new_sale(order_id, self.contract["vendor_offer"]["listing"]["item"]["title"], time.time(), self.contract["buyer_order"]["order"]["payment"]["amount"], payment_address, 0, thumbnail_hash, vendor) with open(file_path, 'w') as outfile: outfile.write(json.dumps(self.contract, indent=4)) self.timeout = reactor.callLater(600, self._delete_unfunded) self.blockchain.subscribe_address(payment_address, notification_cb=self.on_tx_received) def _delete_unfunded(self): """ The user failed to fund the contract in the 10 minute window. Remove it from the file system and db. """ order_id = digest(json.dumps(self.contract, indent=4)).encode("hex") if self.is_purchase: file_path = DATA_FOLDER + "purchases/in progress/" + order_id + ".json" self.db.Purchases().delete_purchase(order_id) else: file_path = DATA_FOLDER + "store/listings/in progress/" + order_id + ".json" self.db.Sales().delete_sale(order_id) if os.path.exists(file_path): os.remove(file_path) def on_tx_received(self, address_version, address_hash, height, block_hash, tx): """ Fire when the libbitcoin server tells us we received a payment to this funding address. While unlikely, a user may send multiple transactions to the funding address reach the funding level. We need to keep a running balance and increment it when a new transaction is received. If the contract is fully funded, we push a notification to the websockets. """ # decode the transaction transaction = bitcoin.deserialize(tx.encode("hex")) # get the amount (in satoshi) the user is expected to pay amount_to_pay = int(float(self.contract["buyer_order"]["order"]["payment"]["amount"]) * 100000000) if tx not in self.received_txs: # make sure we aren't parsing the same tx twice. output_script = 'a914' + digest(unhexlify( self.contract["buyer_order"]["order"]["payment"]["redeem_script"])).encode("hex") + '87' for output in transaction["outs"]: if output["script"] == output_script: self.amount_funded += output["value"] if tx not in self.received_txs: self.received_txs.append(tx) if self.amount_funded >= amount_to_pay: # if fully funded self.timeout.cancel() self.blockchain.unsubscribe_address( self.contract["buyer_order"]["order"]["payment"]["address"], self.on_tx_received) order_id = digest(json.dumps(self.contract, indent=4)).encode("hex") if self.is_purchase: message_json = { "payment_received": { "address": self.contract["buyer_order"]["order"]["payment"]["address"], "order_id": order_id } } # update the db self.db.Purchases().update_status(order_id, 1) self.log.info("Payment for order id %s successfully broadcast to network." % order_id) else: message_json = { "new_order": { "order_id": order_id, "title": self.contract["vendor_offer"]["listing"]["item"]["title"] } } self.db.Sales().update_status(order_id, 1) self.log.info("Received new order %s" % order_id) # push the message over websockets self.ws.push(json.dumps(message_json, indent=4)) def get_contract_id(self): contract = json.dumps(self.contract, indent=4) return digest(contract) def delete(self, delete_images=True): """ Deletes the contract json from the OpenBazaar directory as well as the listing metadata from the db and all the related images in the file system. """ # build the file_name from the contract file_name = str(self.contract["vendor_offer"]["listing"]["item"]["title"][:100]) file_name = re.sub(r"[^\w\s]", '', file_name) file_name = re.sub(r"\s+", '_', file_name) file_path = DATA_FOLDER + "store/listings/contracts/" + file_name + ".json" h = self.db.HashMap() # maybe delete the images from disk if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"] and delete_images: for image_hash in self.contract["vendor_offer"]["listing"]["item"]["image_hashes"]: # delete from disk image_path = h.get_file(unhexlify(image_hash)) if os.path.exists(image_path): os.remove(image_path) # remove pointer to the image from the HashMap h.delete(unhexlify(image_hash)) # delete the contract from disk if os.path.exists(file_path): os.remove(file_path) # delete the listing metadata from the db contract_hash = digest(json.dumps(self.contract, indent=4)) self.db.ListingsStore().delete_listing(contract_hash) # remove the pointer to the contract from the HashMap h.delete(contract_hash) def save(self): """ Saves the json contract into the OpenBazaar/store/listings/contracts/ directory. It uses the title as the file name so it's easy on human eyes. A mapping of the hash of the contract and file path is stored in the database so we can retrieve the contract with only its hash. Additionally, the contract metadata (sent in response to the GET_LISTINGS query) is saved in the db for fast access. """ # get the contract title to use as the file name and format it file_name = str(self.contract["vendor_offer"]["listing"]["item"]["title"][:100]) file_name = re.sub(r"[^\w\s]", '', file_name) file_name = re.sub(r"\s+", '_', file_name) # save the json contract to the file system file_path = DATA_FOLDER + "store/listings/contracts/" + file_name + ".json" with open(file_path, 'w') as outfile: outfile.write(json.dumps(self.contract, indent=4)) # Create a `ListingMetadata` protobuf object using data from the full contract listings = Listings() data = listings.ListingMetadata() data.contract_hash = digest(json.dumps(self.contract, indent=4)) vendor_item = self.contract["vendor_offer"]["listing"]["item"] data.title = vendor_item["title"] if "image_hashes" in vendor_item: data.thumbnail_hash = unhexlify(vendor_item["image_hashes"][0]) if "category" in vendor_item: data.category = vendor_item["category"] if "bitcoin" not in vendor_item["price_per_unit"]: data.price = float(vendor_item["price_per_unit"]["fiat"]["price"]) data.currency_code = vendor_item["price_per_unit"]["fiat"][ "currency_code"] else: data.price = float(vendor_item["price_per_unit"]["bitcoin"]) data.currency_code = "BTC" data.nsfw = vendor_item["nsfw"] if "shipping" not in self.contract["vendor_offer"]["listing"]: data.origin = CountryCode.Value("NA") else: data.origin = CountryCode.Value( self.contract["vendor_offer"]["listing"]["shipping"]["shipping_origin"].upper()) for region in self.contract["vendor_offer"]["listing"]["shipping"]["shipping_regions"]: data.ships_to.append(CountryCode.Value(region.upper())) # save the mapping of the contract file path and contract hash in the database self.db.HashMap().insert(data.contract_hash, file_path) # save the `ListingMetadata` protobuf to the database as well self.db.ListingsStore().add_listing(data) def verify(self, sender_key): """ Validate that an order sent over by a buyer is filled out correctly. """ try: contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict) del contract_dict["buyer_order"] contract_hash = digest(json.dumps(contract_dict, indent=4)) ref_hash = unhexlify(self.contract["buyer_order"]["order"]["ref_hash"]) # verify that the reference hash matches the contract and that the contract actually exists if contract_hash != ref_hash or not self.db.HashMap().get_file(ref_hash): raise Exception("Order for contract that doesn't exist") # verify the signature on the order verify_key = nacl.signing.VerifyKey(sender_key) verify_key.verify(json.dumps(self.contract["buyer_order"]["order"], indent=4), unhexlify(self.contract["buyer_order"]["signature"])) # verify buyer included the correct bitcoin amount for payment price_json = self.contract["vendor_offer"]["listing"]["item"]["price_per_unit"] if "bitcoin" in price_json: asking_price = price_json["bitcoin"] else: currency_code = price_json["fiat"]["currency_code"] fiat_price = price_json["fiat"]["price"] request = Request('https://api.bitcoinaverage.com/ticker/' + currency_code.upper() + '/last') response = urlopen(request) conversion_rate = response.read() asking_price = float("{0:.8f}".format(float(fiat_price) / float(conversion_rate))) if asking_price > self.contract["buyer_order"]["order"]["payment"]["amount"]: raise Exception("Insuffient Payment") # verify a valid moderator was selected # TODO: handle direct payments valid_mod = False for mod in self.contract["vendor_offer"]["listing"]["moderators"]: if mod["guid"] == self.contract["buyer_order"]["order"]["moderator"]: valid_mod = True if not valid_mod: raise Exception("Invalid moderator") # verify all the shipping fields exist if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good": shipping = self.contract["buyer_order"]["order"]["shipping"] keys = ["ship_to", "address", "postal_code", "city", "state", "country"] for value in map(shipping.get, keys): if value is None: raise Exception("Missing shipping field") # verify buyer ID pubkeys = self.contract["buyer_order"]["order"]["id"]["pubkeys"] keys = ["guid", "bitcoin", "encryption"] for value in map(pubkeys.get, keys): if value is None: raise Exception("Missing pubkey field") # verify redeem script chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"] for mod in self.contract["vendor_offer"]["listing"]["moderators"]: if mod["guid"] == self.contract["buyer_order"]["order"]["moderator"]: masterkey_m = mod["pubkeys"]["bitcoin"]["key"] masterkey_v = bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey) masterkey_b = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"] buyer_key = derive_childkey(masterkey_b, chaincode) vendor_key = derive_childkey(masterkey_v, chaincode) moderator_key = derive_childkey(masterkey_m, chaincode) redeem_script = '75' + bitcoin.mk_multisig_script([buyer_key, vendor_key, moderator_key], 2) if redeem_script != self.contract["buyer_order"]["order"]["payment"]["redeem_script"]: raise Exception("Invalid redeem script") # verify the payment address if self.testnet: payment_address = bitcoin.p2sh_scriptaddr(redeem_script, 196) else: payment_address = bitcoin.p2sh_scriptaddr(redeem_script) if payment_address != self.contract["buyer_order"]["order"]["payment"]["address"]: raise Exception("Incorrect payment address") return True except Exception: return False
# -*- coding: utf-8 -*- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from importlib import reload from unittest import mock from unittest.mock import patch import pytest from google.api_core import operation from google.auth import credentials as auth_credentials from google.auth.exceptions import GoogleAuthError from google.cloud import aiplatform from google.cloud.aiplatform import base from google.cloud.aiplatform import initializer from google.cloud.aiplatform.metadata import metadata_store from google.cloud.aiplatform_v1 import MetadataServiceClient from google.cloud.aiplatform_v1 import MetadataStore as GapicMetadataStore from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import metadata_service # project _TEST_PROJECT = "test-project" _TEST_LOCATION = "us-central1" _TEST_ALT_LOCATION = "europe-west4" _TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" # metadata_store _TEST_ID = "test-id" _TEST_DEFAULT_ID = "default" _TEST_NAME = ( f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/{_TEST_ID}" ) _TEST_ALT_LOC_NAME = ( f"projects/{_TEST_PROJECT}/locations/{_TEST_ALT_LOCATION}/metadataStores/{_TEST_ID}" ) _TEST_DEFAULT_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/{_TEST_DEFAULT_ID}" _TEST_INVALID_NAME = f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_ID}" # CMEK encryption _TEST_ENCRYPTION_KEY_NAME = "key_1234" _TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec( kms_key_name=_TEST_ENCRYPTION_KEY_NAME ) @pytest.fixture def get_metadata_store_mock(): with patch.object( MetadataServiceClient, "get_metadata_store" ) as get_metadata_store_mock: get_metadata_store_mock.return_value = GapicMetadataStore( name=_TEST_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, ) yield get_metadata_store_mock @pytest.fixture def get_default_metadata_store_mock(): with patch.object( MetadataServiceClient, "get_metadata_store" ) as get_metadata_store_mock: get_metadata_store_mock.return_value = GapicMetadataStore( name=_TEST_DEFAULT_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, ) yield get_metadata_store_mock @pytest.fixture def get_metadata_store_without_name_mock(): with patch.object( MetadataServiceClient, "get_metadata_store" ) as get_metadata_store_mock: get_metadata_store_mock.return_value = GapicMetadataStore( encryption_spec=_TEST_ENCRYPTION_SPEC, ) yield get_metadata_store_mock @pytest.fixture def create_metadata_store_mock(): with patch.object( MetadataServiceClient, "create_metadata_store" ) as create_metadata_store_mock: create_metadata_store_lro_mock = mock.Mock(operation.Operation) create_metadata_store_lro_mock.result.return_value = GapicMetadataStore( name=_TEST_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_metadata_store_mock.return_value = create_metadata_store_lro_mock yield create_metadata_store_mock @pytest.fixture def create_default_metadata_store_mock(): with patch.object( MetadataServiceClient, "create_metadata_store" ) as create_metadata_store_mock: create_metadata_store_lro_mock = mock.Mock(operation.Operation) create_metadata_store_lro_mock.result.return_value = GapicMetadataStore( name=_TEST_DEFAULT_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_metadata_store_mock.return_value = create_metadata_store_lro_mock yield create_metadata_store_mock @pytest.fixture def delete_metadata_store_mock(): with mock.patch.object( MetadataServiceClient, "delete_metadata_store" ) as delete_metadata_store_mock: delete_metadata_store_lro_mock = mock.Mock(operation.Operation) delete_metadata_store_lro_mock.result.return_value = ( metadata_service.DeleteMetadataStoreRequest() ) delete_metadata_store_mock.return_value = delete_metadata_store_lro_mock yield delete_metadata_store_mock class TestMetadataStore: def setup_method(self): reload(initializer) reload(aiplatform) def teardown_method(self): initializer.global_pool.shutdown(wait=True) def test_init_metadata_store(self, get_metadata_store_mock): aiplatform.init(project=_TEST_PROJECT) metadata_store._MetadataStore(metadata_store_name=_TEST_NAME) get_metadata_store_mock.assert_called_once_with( name=_TEST_NAME, retry=base._DEFAULT_RETRY ) def test_init_metadata_store_with_id(self, get_metadata_store_mock): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) metadata_store._MetadataStore(metadata_store_name=_TEST_ID) get_metadata_store_mock.assert_called_once_with( name=_TEST_NAME, retry=base._DEFAULT_RETRY ) def test_init_metadata_store_with_default_id(self, get_metadata_store_mock): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) metadata_store._MetadataStore() get_metadata_store_mock.assert_called_once_with( name=_TEST_DEFAULT_NAME, retry=base._DEFAULT_RETRY ) @pytest.mark.usefixtures("get_metadata_store_without_name_mock") @patch.dict( os.environ, {"GOOGLE_CLOUD_PROJECT": "", "GOOGLE_APPLICATION_CREDENTIALS": ""} ) def test_init_metadata_store_with_id_without_project_or_location(self): with pytest.raises(GoogleAuthError): metadata_store._MetadataStore( metadata_store_name=_TEST_ID, credentials=auth_credentials.AnonymousCredentials(), ) def test_init_metadata_store_with_location_override(self, get_metadata_store_mock): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) metadata_store._MetadataStore( metadata_store_name=_TEST_ID, location=_TEST_ALT_LOCATION ) get_metadata_store_mock.assert_called_once_with( name=_TEST_ALT_LOC_NAME, retry=base._DEFAULT_RETRY ) @pytest.mark.usefixtures("get_metadata_store_mock") def test_init_metadata_store_with_invalid_name(self): with pytest.raises(ValueError): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) metadata_store._MetadataStore(metadata_store_name=_TEST_INVALID_NAME) @pytest.mark.usefixtures("get_default_metadata_store_mock") def test_init_aiplatform_with_encryption_key_name_and_create_default_metadata_store( self, create_default_metadata_store_mock ): aiplatform.init( project=_TEST_PROJECT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) my_metadata_store = metadata_store._MetadataStore._create( encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) expected_metadata_store = GapicMetadataStore( encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_default_metadata_store_mock.assert_called_once_with( parent=_TEST_PARENT, metadata_store_id=_TEST_DEFAULT_ID, metadata_store=expected_metadata_store, ) expected_metadata_store.name = _TEST_DEFAULT_NAME assert my_metadata_store._gca_resource == expected_metadata_store @pytest.mark.usefixtures("get_metadata_store_mock") def test_create_non_default_metadata_store(self, create_metadata_store_mock): aiplatform.init(project=_TEST_PROJECT) my_metadata_store = metadata_store._MetadataStore._create( metadata_store_id=_TEST_ID, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) expected_metadata_store = GapicMetadataStore( encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_metadata_store_mock.assert_called_once_with( parent=_TEST_PARENT, metadata_store_id=_TEST_ID, metadata_store=expected_metadata_store, ) expected_metadata_store.name = _TEST_NAME assert my_metadata_store._gca_resource == expected_metadata_store
"""Forms for activity.""" from django import forms from django.forms.util import ErrorList from apps.widgets.smartgrid.models import ConfirmationCode, TextReminder, Level, \ ColumnName from apps.managers.player_mgr import player_mgr from apps.widgets.smartgrid_library.models import LibraryQuestionChoice class GenerateCodeForm(forms.Form): """Form for generating confirmation codes.""" event_id = forms.IntegerField(widget=forms.HiddenInput(), required=False) num_codes = forms.IntegerField(initial=0) class ChangeLevelForm(forms.Form): """change level form.""" level_choice = forms.ModelChoiceField(queryset=Level.objects.all(), required=True) column_choice = forms.ModelChoiceField(queryset=ColumnName.objects.all(), required=True) class ActivityTextForm(forms.Form): """Text form.""" question = forms.IntegerField(widget=forms.HiddenInput(), required=False) response = forms.CharField(widget=forms.Textarea(attrs={'rows': '2'}), required=True) comment = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}), required=False) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.action = kwargs.pop('action', None) qid = None if 'question_id' in kwargs: qid = kwargs.pop('question_id') super(ActivityTextForm, self).__init__(*args, **kwargs) if qid: self.fields['choice_response'] = forms.ModelChoiceField( queryset=LibraryQuestionChoice.objects.filter(question__id=qid), required=True) def clean(self): """Custom validation to verify confirmation codes.""" cleaned_data = self.cleaned_data # Check if we are validating quetion if cleaned_data["question"] > 0: if not "response" in cleaned_data and not "choice_response" in cleaned_data: self._errors["response"] = ErrorList(["You need to answer the question."]) if "response" in cleaned_data: del cleaned_data["response"] if "choice_response" in cleaned_data: del cleaned_data["choice_response"] _validate_social_email(self.request, self.action, cleaned_data, self._errors) return cleaned_data class ActivityCodeForm(forms.Form): """confirmation code form.""" response = forms.CharField(widget=forms.TextInput(attrs={'size': '15'}), required=True) comment = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}), required=False) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.action = kwargs.pop('action', None) super(ActivityCodeForm, self).__init__(*args, **kwargs) def clean(self): """Custom validation to verify confirmation codes.""" cleaned_data = self.cleaned_data # Check if we are validating a confirmation code. try: code = ConfirmationCode.objects.get(code=cleaned_data["response"].lower()) # Check if the code is inactive. if not code.is_active: self._errors["response"] = ErrorList(["This code has already been used."]) del cleaned_data["response"] # Check if this action is the same as the added action (if provided) elif self.action and code.action.event != self.action: self._errors["response"] = ErrorList( ["This confirmation code is not valid for this action."]) del cleaned_data["response"] # Check if the user has already submitted a code for this action. elif code.action in self.request.user.action_set.filter( actionmember__award_date__isnull=False): self._errors["response"] = ErrorList( ["You have already redeemed a code for this action."]) del cleaned_data["response"] except ConfirmationCode.DoesNotExist: self._errors["response"] = ErrorList(["This code is not valid."]) del cleaned_data["response"] except KeyError: self._errors["response"] = ErrorList(["Please input code."]) _validate_social_email(self.request, self.action, cleaned_data, self._errors) return cleaned_data class ActivityFreeResponseForm(forms.Form): """Free response form.""" response = forms.CharField(widget=forms.Textarea) comment = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}), required=False) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.action = kwargs.pop('action', None) super(ActivityFreeResponseForm, self).__init__(*args, **kwargs) def clean(self): """clean""" cleaned_data = self.cleaned_data _validate_social_email(self.request, self.action, cleaned_data, self._errors) return cleaned_data class ActivityImageForm(forms.Form): """Image upload form.""" image_response = forms.ImageField() comment = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}), required=False) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.action = kwargs.pop('action', None) super(ActivityImageForm, self).__init__(*args, **kwargs) def clean(self): """clean""" cleaned_data = self.cleaned_data _validate_social_email(self.request, self.action, cleaned_data, self._errors) return cleaned_data class ActivityFreeResponseImageForm(forms.Form): """Free response and image upload form.""" response = forms.CharField(widget=forms.Textarea) image_response = forms.ImageField() comment = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}), required=False) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.action = kwargs.pop('action', None) super(ActivityFreeResponseImageForm, self).__init__(*args, **kwargs) def clean(self): """clean""" cleaned_data = self.cleaned_data _validate_social_email(self.request, self.action, cleaned_data, self._errors) return cleaned_data class CommitmentCommentForm(forms.Form): """commitment comment form.""" social_email = forms.EmailField(required=False) def __init__(self, *args, **kwargs): self.username = kwargs.pop('user', None) super(CommitmentCommentForm, self).__init__(*args, **kwargs) def clean_social_email(self): """Check if this social_email is valid.""" email = self.cleaned_data['social_email'].strip().lower() if email: user = player_mgr.get_user_by_email(email) if user == None: raise forms.ValidationError('Can not find a registered user with such email.') elif user.username == self.username: raise forms.ValidationError('Can not use your own email.') return email class SurveyForm(forms.Form): """survey form.""" def __init__(self, *args, **kwargs): questions = None if 'questions' in kwargs: questions = kwargs.pop('questions') super(SurveyForm, self).__init__(*args, **kwargs) if questions: for i, q in enumerate(questions): self.fields['choice_response_%s' % i] = forms.ModelChoiceField( queryset=LibraryQuestionChoice.objects.filter(question__id=q.pk), label=q.question, required=True ) def clean(self): """clean""" cleaned_data = self.cleaned_data return cleaned_data def _validate_social_email(request, action, cleaned_data, errors): """validate the two social email.""" _ = action _validate_one_email(request, cleaned_data, "social_email", errors) def _validate_one_email(request, cleaned_data, email, errors): """validate one email.""" if cleaned_data[email]: user = player_mgr.get_user_by_email(cleaned_data[email].lower()) if user == None or user == request.user: errors[email] = ErrorList(["Invalid email. Please input only one valid email."]) del cleaned_data[email] class EventCodeForm(forms.Form): """event code form in the upcoming event widget.""" response = forms.CharField(widget=forms.TextInput(attrs={'size': '12'})) social_email = forms.CharField(widget=forms.TextInput(attrs={'size': '15'}), initial="Email", required=False) #------ Reminder form --------- from localflavor.us.forms import USPhoneNumberField REMINDER_TIME_CHOICES = ( ("1", "1 hour"), ("2", "2 hours"), ("3", "3 hours"), ("4", "4 hours"), ("5", "5 hours"), ) class ReminderForm(forms.Form): """reminder form.""" send_email = forms.BooleanField(required=False) email = forms.EmailField(required=False, label="Email Address") send_text = forms.BooleanField(required=False) email_advance = forms.ChoiceField(choices=REMINDER_TIME_CHOICES, label="Send reminder how far in advance?") text_number = USPhoneNumberField(required=False, label="Mobile phone number") text_carrier = forms.ChoiceField(choices=TextReminder.TEXT_CARRIERS, required=False, label="Carrier") text_advance = forms.ChoiceField(choices=REMINDER_TIME_CHOICES, label="Send reminder how far in advance?") def clean(self): """validate form.""" cleaned_data = self.cleaned_data send_email = cleaned_data.get("send_email") email = None if "email" in cleaned_data: email = cleaned_data.get("email") if send_email and (not email or len(email) == 0): raise forms.ValidationError("A valid email address is required.") send_text = cleaned_data.get("send_text") number = None if "text_number" in cleaned_data: number = cleaned_data.get("text_number") if send_text and (not number or len(number) == 0): raise forms.ValidationError("A valid phone number is required.") return cleaned_data
"""Support for Russound multizone controllers using RIO Protocol.""" from __future__ import annotations from russound_rio import Russound import voluptuous as vol from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity from homeassistant.components.media_player.const import ( MEDIA_TYPE_MUSIC, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, ) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF, STATE_ON, ) from homeassistant.core import HomeAssistant, callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType SUPPORT_RUSSOUND = ( SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_PORT, default=9621): cv.port, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Russound RIO platform.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) russ = Russound(hass.loop, host, port) await russ.connect() # Discover sources and zones sources = await russ.enumerate_sources() valid_zones = await russ.enumerate_zones() devices = [] for zone_id, name in valid_zones: await russ.watch_zone(zone_id) dev = RussoundZoneDevice(russ, zone_id, name, sources) devices.append(dev) @callback def on_stop(event): """Shutdown cleanly when hass stops.""" hass.loop.create_task(russ.close()) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop) async_add_entities(devices) class RussoundZoneDevice(MediaPlayerEntity): """Representation of a Russound Zone.""" def __init__(self, russ, zone_id, name, sources): """Initialize the zone device.""" super().__init__() self._name = name self._russ = russ self._zone_id = zone_id self._sources = sources def _zone_var(self, name, default=None): return self._russ.get_cached_zone_variable(self._zone_id, name, default) def _source_var(self, name, default=None): current = int(self._zone_var("currentsource", 0)) if current: return self._russ.get_cached_source_variable(current, name, default) return default def _source_na_var(self, name): """Will replace invalid values with None.""" current = int(self._zone_var("currentsource", 0)) if current: value = self._russ.get_cached_source_variable(current, name, None) if value in (None, "", "------"): return None return value return None def _zone_callback_handler(self, zone_id, *args): if zone_id == self._zone_id: self.schedule_update_ha_state() def _source_callback_handler(self, source_id, *args): current = int(self._zone_var("currentsource", 0)) if source_id == current: self.schedule_update_ha_state() async def async_added_to_hass(self): """Register callback handlers.""" self._russ.add_zone_callback(self._zone_callback_handler) self._russ.add_source_callback(self._source_callback_handler) @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the zone.""" return self._zone_var("name", self._name) @property def state(self): """Return the state of the device.""" status = self._zone_var("status", "OFF") if status == "ON": return STATE_ON if status == "OFF": return STATE_OFF @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_RUSSOUND @property def source(self): """Get the currently selected source.""" return self._source_na_var("name") @property def source_list(self): """Return a list of available input sources.""" return [x[1] for x in self._sources] @property def media_content_type(self): """Content type of current playing media.""" return MEDIA_TYPE_MUSIC @property def media_title(self): """Title of current playing media.""" return self._source_na_var("songname") @property def media_artist(self): """Artist of current playing media, music track only.""" return self._source_na_var("artistname") @property def media_album_name(self): """Album name of current playing media, music track only.""" return self._source_na_var("albumname") @property def media_image_url(self): """Image url of current playing media.""" return self._source_na_var("coverarturl") @property def volume_level(self): """Volume level of the media player (0..1). Value is returned based on a range (0..50). Therefore float divide by 50 to get to the required range. """ return float(self._zone_var("volume", 0)) / 50.0 async def async_turn_off(self): """Turn off the zone.""" await self._russ.send_zone_event(self._zone_id, "ZoneOff") async def async_turn_on(self): """Turn on the zone.""" await self._russ.send_zone_event(self._zone_id, "ZoneOn") async def async_set_volume_level(self, volume): """Set the volume level.""" rvol = int(volume * 50.0) await self._russ.send_zone_event(self._zone_id, "KeyPress", "Volume", rvol) async def async_select_source(self, source): """Select the source input for this zone.""" for source_id, name in self._sources: if name.lower() != source.lower(): continue await self._russ.send_zone_event(self._zone_id, "SelectSource", source_id) break
import os import re import subprocess import sys from reviewboard.diffviewer.parser import DiffParser from reviewboard.scmtools.core import SCMTool, HEAD, PRE_CREATION from reviewboard.scmtools.errors import SCMError, FileNotFoundError # This specific import is necessary to handle the paths for # cygwin enabled machines. if (sys.platform.startswith('win') or sys.platform.startswith('cygwin')): import ntpath as cpath else: import posixpath as cpath class ClearCaseTool(SCMTool): name = 'ClearCase' uses_atomic_revisions = False supports_authentication = False dependencies = { 'executables': ['cleartool'], } # This regular expression can extract from extended_path # pure system path. It is construct from two main parts. # First match everything from beginning of line to first # occurence of /. Second match parts between /main and # numbers (file version). # This patch assume each branch present in extended_path # was derived from /main and there is no file or directory # called "main" in path. UNEXTENDED = re.compile(r'^(.+?)/|/?(.+?)/main/?.*?/([0-9]+|CHECKEDOUT)') def __init__(self, repository): self.repopath = repository.path SCMTool.__init__(self, repository) self.client = ClearCaseClient(self.repopath) def unextend_path(self, extended_path): """Remove ClearCase revision and branch informations from path. ClearCase paths contain additional informations about branch and file version preceded by @@. This function remove this parts from ClearCase path to make it more readable For example this function convert extended path:: /vobs/comm@@/main/122/network@@/main/55/sntp @@/main/4/src@@/main/1/sntp.c@@/main/8 to the the to regular path:: /vobs/comm/network/sntp/src/sntp.c """ if not '@@' in extended_path: return HEAD, extended_path # Result of regular expression search result is list of tuples. # We must flat this to one list. The best way is use list comprehension. # b is first because it frequently occure in tuples. # Before that remove @@ from path. unextended_chunks = [ b or a for a, b, foo in self.UNEXTENDED.findall(extended_path.replace('@@', '')) ] # Purpose of realpath is remove parts like /./ generated by # ClearCase when vobs branch was fresh created unextended_path = cpath.realpath( cpath.join(*unextended_chunks) ) revision = extended_path.rsplit('@@', 1)[1] if revision.endswith('CHECKEDOUT'): revision = HEAD return (revision, unextended_path) @classmethod def relpath(cls, path, start): """Wrapper for os.path.relpath for Python 2.4. Python 2.4 doesn't have the os.path.relpath function, so this approximates it well enough for our needs. """ if not hasattr(cpath, 'relpath'): if start[-1] != os.sep: start += os.sep return path[len(start):] return cpath.relpath(path, start) def normalize_path_for_display(self, filename): """Return display friendly path without revision informations. In path construct for only display purpuse we don't need information about branch, version or even repository path so we return unextended path relative to repopath (view) """ return self.relpath(self.unextend_path(filename)[1], self.repopath) def get_repository_info(self): vobstag = self._get_vobs_tag(self.repopath) return { 'repopath': self.repopath, 'uuid': self._get_vobs_uuid(vobstag) } def _get_vobs_tag(self, repopath): cmdline = ["cleartool", "describe", "-short", "vob:."] p = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.repopath) (res, error) = p.communicate() failure = p.poll() if failure: raise SCMError(error) return res.rstrip() def _get_vobs_uuid(self, vobstag): cmdline = ["cleartool", "lsvob", "-long", vobstag] p = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.repopath) (res, error) = p.communicate() failure = p.poll() if failure: raise SCMError(error) for line in res.splitlines(True): if line.startswith('Vob family uuid:'): return line.split(' ')[-1].rstrip() raise SCMError("Can't find familly uuid for vob: %s" % vobstag) def get_file(self, extended_path, revision=HEAD): """Return content of file or list content of directory""" if not extended_path: raise FileNotFoundError(extended_path, revision) if revision == PRE_CREATION: return '' if cpath.isdir(extended_path): output = self.client.list_dir(extended_path, revision) elif cpath.exists(extended_path): output = self.client.cat_file(extended_path, revision) else: raise FileNotFoundError(extended_path, revision) return output def parse_diff_revision(self, extended_path, revision_str, *args, **kwargs): """Guess revision based on extended_path. Revision is part of file path, called extended-path, revision_str contains only modification's timestamp. """ if extended_path.endswith(os.path.join(os.sep, 'main', '0')): revision = PRE_CREATION elif (extended_path.endswith('CHECKEDOUT') or not '@@' in extended_path): revision = HEAD else: revision = extended_path.rsplit('@@', 1)[1] return extended_path, revision def get_fields(self): return ['basedir', 'diff_path'] def get_parser(self, data): return ClearCaseDiffParser(data, self.repopath) class ClearCaseDiffParser(DiffParser): """ Special parsing for diffs created with the post-review for ClearCase. """ SPECIAL_REGEX = re.compile(r'^==== (\S+) (\S+) ====$') def __init__(self, data, repopath): self.repopath = repopath super(ClearCaseDiffParser, self).__init__(data) def parse_diff_header(self, linenum, info): """Obtain correct clearcase file paths. Paths for the same file may differ from paths in developer view because it depends from configspec and this is custom so we translate oids, attached by post-review, to filenames to get paths working well inside clearcase view on reviewboard side. """ # Because ==== oid oid ==== is present after each header # parse standard +++ and --- headers at the first place linenum = super(ClearCaseDiffParser, self).parse_diff_header(linenum, info) m = self.SPECIAL_REGEX.match(self.lines[linenum]) if m: info['origFile'] = self._oid2filename(m.group(1)) info['newFile'] = self._oid2filename(m.group(2)) linenum += 1 if (linenum < len(self.lines) and (self.lines[linenum].startswith("Binary files ") or self.lines[linenum].startswith("Files "))): # To consider filenames translated from oids # origInfo and newInfo keys must exists. # Other files already contain this values field # by timestamp from +++/--- diff header. info['origInfo'] = '' info['newInfo'] = '' # Binary files need add origInfo and newInfo manally # because they don't have diff's headers (only oids). info['binary'] = True linenum += 1 return linenum def _oid2filename(self, oid): cmdline = ["cleartool", "describe", "-fmt", "%En@@%Vn", "oid:%s" % oid] p = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.repopath) (res, error) = p.communicate() failure = p.poll() if failure: raise SCMError(error) drive = os.path.splitdrive(self.repopath)[0] if drive: res = os.path.join(drive, res) return ClearCaseTool.relpath(res, self.repopath) class ClearCaseClient(object): def __init__(self, path): self.path = path def cat_file(self, filename, revision): f = open(filename, 'r') lines = f.readlines() f.close() return ''.join(lines) def list_dir(self, path, revision): return ''.join([ '%s\n' % s for s in sorted(os.listdir(path)) ])
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from heat.common.i18n import _LI from heat.engine import dependencies from heat.engine import resource from heat.engine import scheduler from heat.objects import resource as resource_objects LOG = logging.getLogger(__name__) class StackUpdate(object): """ A Task to perform the update of an existing stack to a new template. """ def __init__(self, existing_stack, new_stack, previous_stack, rollback=False, error_wait_time=None): """Initialise with the existing stack and the new stack.""" self.existing_stack = existing_stack self.new_stack = new_stack self.previous_stack = previous_stack self.rollback = rollback self.error_wait_time = error_wait_time self.existing_snippets = dict((n, r.frozen_definition()) for n, r in self.existing_stack.items()) def __repr__(self): if self.rollback: return '%s Rollback' % str(self.existing_stack) else: return '%s Update' % str(self.existing_stack) @scheduler.wrappertask def __call__(self): """Return a co-routine that updates the stack.""" cleanup_prev = scheduler.DependencyTaskGroup( self.previous_stack.dependencies, self._remove_backup_resource, reverse=True) self.updater = scheduler.DependencyTaskGroup( self.dependencies(), self._resource_update, error_wait_time=self.error_wait_time) if not self.rollback: yield cleanup_prev() try: yield self.updater() finally: self.previous_stack.reset_dependencies() def _resource_update(self, res): if res.name in self.new_stack and self.new_stack[res.name] is res: return self._process_new_resource_update(res) else: return self._process_existing_resource_update(res) @scheduler.wrappertask def _remove_backup_resource(self, prev_res): if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), (prev_res.DELETE, prev_res.COMPLETE)): LOG.debug("Deleting backup resource %s" % prev_res.name) yield prev_res.destroy() @staticmethod def _exchange_stacks(existing_res, prev_res): resource_objects.Resource.exchange_stacks(existing_res.stack.context, existing_res.id, prev_res.id) prev_stack, existing_stack = prev_res.stack, existing_res.stack prev_stack.add_resource(existing_res) existing_stack.add_resource(prev_res) @scheduler.wrappertask def _create_resource(self, new_res): res_name = new_res.name # Clean up previous resource if res_name in self.previous_stack: prev_res = self.previous_stack[res_name] if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), (prev_res.DELETE, prev_res.COMPLETE)): # Swap in the backup resource if it is in a valid state, # instead of creating a new resource if prev_res.status == prev_res.COMPLETE: LOG.debug("Swapping in backup Resource %s" % res_name) self._exchange_stacks(self.existing_stack[res_name], prev_res) return LOG.debug("Deleting backup Resource %s" % res_name) yield prev_res.destroy() # Back up existing resource if res_name in self.existing_stack: LOG.debug("Backing up existing Resource %s" % res_name) existing_res = self.existing_stack[res_name] self.previous_stack.add_resource(existing_res) existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE) self.existing_stack.add_resource(new_res) # Save new resource definition to backup stack if it is not # present in backup stack template already # it allows to resolve all dependencies that existing resource # can have if it was copied to backup stack if (res_name not in self.previous_stack.t[self.previous_stack.t.RESOURCES]): LOG.debug("Backing up new Resource %s" % res_name) definition = new_res.t.reparse(self.previous_stack, new_res.stack.t) self.previous_stack.t.add_resource(definition) self.previous_stack.t.store(self.previous_stack.context) yield new_res.create() @scheduler.wrappertask def _process_new_resource_update(self, new_res): res_name = new_res.name res_type = new_res.type() if (res_name in self.existing_stack and res_type == self.existing_stack[res_name].type()): existing_res = self.existing_stack[res_name] try: yield self._update_in_place(existing_res, new_res) except resource.UpdateReplace: pass else: # Save updated resource definition to backup stack # cause it allows the backup stack resources to be synchronized LOG.debug("Backing up updated Resource %s" % res_name) definition = existing_res.t.reparse(self.previous_stack, existing_res.stack.t) self.previous_stack.t.add_resource(definition) self.previous_stack.t.store(self.previous_stack.context) LOG.info(_LI("Resource %(res_name)s for stack %(stack_name)s " "updated"), {'res_name': res_name, 'stack_name': self.existing_stack.name}) return yield self._create_resource(new_res) def _update_in_place(self, existing_res, new_res): existing_snippet = self.existing_snippets[existing_res.name] prev_res = self.previous_stack.get(new_res.name) # Note the new resource snippet is resolved in the context # of the existing stack (which is the stack being updated) # but with the template of the new stack (in case the update # is switching template implementations) new_snippet = new_res.t.reparse(self.existing_stack, self.new_stack.t) return existing_res.update(new_snippet, existing_snippet, prev_resource=prev_res) @scheduler.wrappertask def _process_existing_resource_update(self, existing_res): res_name = existing_res.name if res_name in self.previous_stack: yield self._remove_backup_resource(self.previous_stack[res_name]) if res_name in self.new_stack: new_res = self.new_stack[res_name] if new_res.state == (new_res.INIT, new_res.COMPLETE): # Already updated in-place return if existing_res.stack is not self.previous_stack: yield existing_res.destroy() if res_name not in self.new_stack: self.existing_stack.remove_resource(res_name) def dependencies(self): ''' Return a Dependencies object representing the dependencies between update operations to move from an existing stack definition to a new one. ''' existing_deps = self.existing_stack.dependencies new_deps = self.new_stack.dependencies def edges(): # Create/update the new stack's resources in create order for e in new_deps.graph().edges(): yield e # Destroy/cleanup the old stack's resources in delete order for e in existing_deps.graph(reverse=True).edges(): yield e # Don't cleanup old resources until after they have been replaced for name, res in six.iteritems(self.existing_stack): if name in self.new_stack: yield (res, self.new_stack[name]) return dependencies.Dependencies(edges()) def preview(self): upd_keys = set(self.new_stack.resources.keys()) cur_keys = set(self.existing_stack.resources.keys()) common_keys = cur_keys.intersection(upd_keys) deleted_keys = cur_keys.difference(upd_keys) added_keys = upd_keys.difference(cur_keys) updated_keys = [] replaced_keys = [] for key in common_keys: current_res = self.existing_stack.resources[key] updated_res = self.new_stack.resources[key] current_props = current_res.frozen_definition().properties( current_res.properties_schema, current_res.context) updated_props = updated_res.frozen_definition().properties( updated_res.properties_schema, updated_res.context) try: if current_res._needs_update(updated_res.frozen_definition(), current_res.frozen_definition(), updated_props, current_props, None, check_init_complete=False): current_res.update_template_diff_properties(updated_props, current_props) updated_keys.append(key) except resource.UpdateReplace: replaced_keys.append(key) return { 'unchanged': list(set(common_keys).difference( set(updated_keys + replaced_keys))), 'updated': updated_keys, 'replaced': replaced_keys, 'added': added_keys, 'deleted': deleted_keys, }
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sylvain Afchain, eNovance SAS # @author: Francois Eleouet, Orange # @author: Mathieu Rohon, Orange import mock from neutron.common import constants from neutron.common import topics from neutron import context from neutron.db import agents_db from neutron.extensions import portbindings from neutron.extensions import providernet as pnet from neutron import manager from neutron.openstack.common import timeutils from neutron.plugins.ml2 import config as config from neutron.plugins.ml2.drivers.l2pop import constants as l2_consts from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import rpc from neutron.tests.unit import test_db_plugin as test_plugin HOST = 'my_l2_host' L2_AGENT = { 'binary': 'neutron-openvswitch-agent', 'host': HOST, 'topic': constants.L2_AGENT_TOPIC, 'configurations': {'tunneling_ip': '20.0.0.1', 'tunnel_types': ['vxlan']}, 'agent_type': constants.AGENT_TYPE_OVS, 'tunnel_type': [], 'start_flag': True } L2_AGENT_2 = { 'binary': 'neutron-openvswitch-agent', 'host': HOST + '_2', 'topic': constants.L2_AGENT_TOPIC, 'configurations': {'tunneling_ip': '20.0.0.2', 'tunnel_types': ['vxlan']}, 'agent_type': constants.AGENT_TYPE_OVS, 'tunnel_type': [], 'start_flag': True } L2_AGENT_3 = { 'binary': 'neutron-openvswitch-agent', 'host': HOST + '_3', 'topic': constants.L2_AGENT_TOPIC, 'configurations': {'tunneling_ip': '20.0.0.3', 'tunnel_types': []}, 'agent_type': constants.AGENT_TYPE_OVS, 'tunnel_type': [], 'start_flag': True } L2_AGENT_4 = { 'binary': 'neutron-openvswitch-agent', 'host': HOST + '_4', 'topic': constants.L2_AGENT_TOPIC, 'configurations': {'tunneling_ip': '20.0.0.4', 'tunnel_types': ['vxlan']}, 'agent_type': constants.AGENT_TYPE_OVS, 'tunnel_type': [], 'start_flag': True } PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' DEVICE_OWNER_COMPUTE = 'compute:None' class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. config.cfg.CONF.set_override('mechanism_drivers', ['openvswitch', 'linuxbridge', 'l2population'], 'ml2') super(TestL2PopulationRpcTestCase, self).setUp(PLUGIN_NAME) self.adminContext = context.get_admin_context() self.type_manager = managers.TypeManager() self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager) self.orig_supported_agents = l2_consts.SUPPORTED_AGENT_TYPES l2_consts.SUPPORTED_AGENT_TYPES = [constants.AGENT_TYPE_OVS] net_arg = {pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '1'} self._network = self._make_network(self.fmt, 'net1', True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg) notifier_patch = mock.patch(NOTIFIER) notifier_patch.start() self.fanout_topic = topics.get_topic_name(topics.AGENT, topics.L2POPULATION, topics.UPDATE) fanout = ('neutron.common.rpc.RpcProxy.fanout_cast') fanout_patch = mock.patch(fanout) self.mock_fanout = fanout_patch.start() cast = ('neutron.common.rpc.RpcProxy.cast') cast_patch = mock.patch(cast) self.mock_cast = cast_patch.start() uptime = ('neutron.plugins.ml2.drivers.l2pop.db.L2populationDbMixin.' 'get_agent_uptime') uptime_patch = mock.patch(uptime, return_value=190) uptime_patch.start() def tearDown(self): l2_consts.SUPPORTED_AGENT_TYPES = self.orig_supported_agents super(TestL2PopulationRpcTestCase, self).tearDown() def _register_ml2_agents(self): callback = agents_db.AgentExtRpcCallback() callback.report_state(self.adminContext, agent_state={'agent_state': L2_AGENT}, time=timeutils.strtime()) callback.report_state(self.adminContext, agent_state={'agent_state': L2_AGENT_2}, time=timeutils.strtime()) callback.report_state(self.adminContext, agent_state={'agent_state': L2_AGENT_3}, time=timeutils.strtime()) callback.report_state(self.adminContext, agent_state={'agent_state': L2_AGENT_4}, time=timeutils.strtime()) def test_fdb_add_called(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'add_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected, topic=self.fanout_topic) def test_fdb_add_not_called_type_local(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_3'} with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.assertFalse(self.mock_fanout.called) def test_fdb_add_two_agents(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port1: host_arg = {portbindings.HOST_ID: HOST + '_2', 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port2: p1 = port1['port'] p2 = port2['port'] device = 'tap' + p1['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected1 = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, [p2['mac_address'], p2_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'add_fdb_entries'} topic = topics.get_topic_name(topics.AGENT, topics.L2POPULATION, topics.UPDATE, HOST) self.mock_cast.assert_called_with(mock.ANY, expected1, topic=topic) expected2 = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'add_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected2, topic=self.fanout_topic) def test_fdb_add_called_two_networks(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.subnet(cidr='10.1.0.0/24') as subnet2: with self.port(subnet=subnet2, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port3: p1 = port1['port'] p3 = port3['port'] device = 'tap' + p3['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected1 = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'add_fdb_entries'} topic = topics.get_topic_name(topics.AGENT, topics.L2POPULATION, topics.UPDATE, HOST) self.mock_cast.assert_called_with(mock.ANY, expected1, topic=topic) p3_ips = [p['ip_address'] for p in p3['fixed_ips']] expected2 = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p3['mac_address'], p3_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'add_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected2, topic=self.fanout_topic) def test_update_port_down(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {'args': {'fdb_entries': {p2['network_id']: {'ports': {'20.0.0.1': [[p2['mac_address'], p2_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected, topic=self.fanout_topic) def test_update_port_down_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {'args': {'fdb_entries': {p2['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p2['mac_address'], p2_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected, topic=self.fanout_topic) def test_delete_port(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device1 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {'args': {'fdb_entries': {p2['network_id']: {'ports': {'20.0.0.1': [[p2['mac_address'], p2_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_any_call( mock.ANY, expected, topic=self.fanout_topic) def test_delete_port_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_any_call( mock.ANY, expected, topic=self.fanout_topic) def test_fixed_ips_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.10'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 2) add_expected = {'args': {'fdb_entries': {'chg_ip': {p1['network_id']: {'20.0.0.1': {'after': [[p1['mac_address'], '10.0.0.10']]}}}}}, 'namespace': None, 'method': 'update_fdb_entries'} self.mock_fanout.assert_any_call( mock.ANY, add_expected, topic=self.fanout_topic) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 2) upd_expected = {'args': {'fdb_entries': {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [[p1['mac_address'], '10.0.0.10']], 'after': [[p1['mac_address'], '10.0.0.16']]}}}}}, 'namespace': None, 'method': 'update_fdb_entries'} self.mock_fanout.assert_any_call( mock.ANY, upd_expected, topic=self.fanout_topic) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 1) del_expected = {'args': {'fdb_entries': {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [[p1['mac_address'], '10.0.0.2']]}}}}}, 'namespace': None, 'method': 'update_fdb_entries'} self.mock_fanout.assert_any_call( mock.ANY, del_expected, topic=self.fanout_topic) def test_no_fdb_updates_without_port_updates(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1['status'] = 'ACTIVE' self.mock_fanout.reset_mock() fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.' 'L2populationAgentNotifyAPI._notification_fanout') fanout_patch = mock.patch(fanout) mock_fanout = fanout_patch.start() plugin = manager.NeutronManager.get_plugin() plugin.update_port(self.adminContext, p1['id'], port1) self.assertFalse(mock_fanout.called) fanout_patch.stop() def test_host_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: L2_AGENT['host']} host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host2_arg) as port2: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=L2_AGENT['host'], device=device1) p2 = port2['port'] device2 = 'tap' + p2['id'] self.callbacks.update_device_up( self.adminContext, agent_id=L2_AGENT_2['host'], device=device2) data2 = {'port': {'binding:host_id': L2_AGENT_2['host']}} req = self.new_update_request('ports', data2, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['binding:host_id'], L2_AGENT_2['host']) self.mock_fanout.reset_mock() self.callbacks.get_device_details( self.adminContext, device=device1, agent_id=L2_AGENT_2['host']) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected, topic=self.fanout_topic) def test_host_changed_twice(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: L2_AGENT['host']} host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host2_arg) as port2: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=L2_AGENT['host'], device=device1) p2 = port2['port'] device2 = 'tap' + p2['id'] self.callbacks.update_device_up( self.adminContext, agent_id=L2_AGENT_2['host'], device=device2) data2 = {'port': {'binding:host_id': L2_AGENT_2['host']}} req = self.new_update_request('ports', data2, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['binding:host_id'], L2_AGENT_2['host']) data4 = {'port': {'binding:host_id': L2_AGENT_4['host']}} req = self.new_update_request('ports', data4, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['binding:host_id'], L2_AGENT_4['host']) self.mock_fanout.reset_mock() self.callbacks.get_device_details( self.adminContext, device=device1, agent_id=L2_AGENT_4['host']) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {'args': {'fdb_entries': {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, [p1['mac_address'], p1_ips[0]]]}, 'network_type': 'vxlan', 'segment_id': 1}}}, 'namespace': None, 'method': 'remove_fdb_entries'} self.mock_fanout.assert_called_with( mock.ANY, expected, topic=self.fanout_topic)
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import textwrap from contextlib import closing from xml.etree import ElementTree from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform from pants.backend.jvm.targets.jar_library import JarLibrary from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis import ZincAnalysis from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis_parser import ZincAnalysisParser from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.hash_utils import hash_file from pants.base.workunit import WorkUnit from pants.java.distribution.distribution import Distribution from pants.java.jar.shader import Shader from pants.option.options import Options from pants.util.contextutil import open_zip from pants.util.dirutil import relativize_paths, safe_open # Well known metadata file required to register scalac plugins with nsc. _PLUGIN_INFO_FILE = 'scalac-plugin.xml' class ZincCompile(JvmCompile): _ZINC_MAIN = 'org.pantsbuild.zinc.Main' _supports_concurrent_execution = True @staticmethod def write_plugin_info(resources_dir, target): root = os.path.join(resources_dir, target.id) plugin_info_file = os.path.join(root, _PLUGIN_INFO_FILE) with safe_open(plugin_info_file, 'w') as f: f.write(textwrap.dedent(""" <plugin> <name>{}</name> <classname>{}</classname> </plugin> """.format(target.plugin, target.classname)).strip()) return root, plugin_info_file @classmethod def global_subsystems(cls): return super(ZincCompile, cls).global_subsystems() + (ScalaPlatform, ) @classmethod def get_args_default(cls, bootstrap_option_values): return ('-S-encoding', '-SUTF-8','-S-g:vars') @classmethod def get_warning_args_default(cls): return ('-S-deprecation', '-S-unchecked') @classmethod def get_no_warning_args_default(cls): return ('-S-nowarn',) @classmethod def register_options(cls, register): super(ZincCompile, cls).register_options(register) register('--plugins', action='append', fingerprint=True, help='Use these scalac plugins.') register('--plugin-args', advanced=True, type=Options.dict, default={}, fingerprint=True, help='Map from plugin name to list of arguments for that plugin.') register('--name-hashing', action='store_true', default=False, fingerprint=True, help='Use zinc name hashing.') cls.register_jvm_tool(register, 'zinc', main=cls._ZINC_MAIN, custom_rules=[ # The compiler-interface and sbt-interface tool jars carry xsbt and # xsbti interfaces that are used across the shaded tool jar boundary so # we preserve these root packages wholesale along with the core scala # APIs. Shader.exclude_package('scala', recursive=True), Shader.exclude_package('xsbt', recursive=True), Shader.exclude_package('xsbti', recursive=True), ], fingerprint=True) cls.register_jvm_tool(register, 'compiler-interface', fingerprint=True) cls.register_jvm_tool(register, 'sbt-interface', fingerprint=True) cls.register_jvm_tool(register, 'plugin-jars', default=[], fingerprint=True) def __init__(self, *args, **kwargs): super(ZincCompile, self).__init__(*args, **kwargs) # A directory independent of any other classpath which can contain per-target # plugin resource files. self._plugin_info_dir = os.path.join(self.workdir, 'scalac-plugin-info') self._lazy_plugin_args = None def create_analysis_tools(self): return AnalysisTools(self.context.java_home, ZincAnalysisParser(), ZincAnalysis) def zinc_classpath(self): # Zinc takes advantage of tools.jar if it's presented in classpath. # For example com.sun.tools.javac.Main is used for in process java compilation. def locate_tools_jar(): try: return Distribution.cached(jdk=True).find_libs(['tools.jar']) except Distribution.Error: self.context.log.info('Failed to locate tools.jar. ' 'Install a JDK to increase performance of Zinc.') return [] return self.tool_classpath('zinc') + locate_tools_jar() def compiler_classpath(self): return ScalaPlatform.global_instance().compiler_classpath(self.context.products) def extra_compile_time_classpath_elements(self): # Classpath entries necessary for our compiler plugins. return self.plugin_jars() def plugin_jars(self): """The classpath entries for jars containing code for enabled plugins.""" if self.get_options().plugins: return self.tool_classpath('plugin-jars') else: return [] def plugin_args(self): if self._lazy_plugin_args is None: self._lazy_plugin_args = self._create_plugin_args() return self._lazy_plugin_args def _create_plugin_args(self): if not self.get_options().plugins: return [] plugin_args = self.get_options().plugin_args active_plugins = self._find_plugins() ret = [] for name, jar in active_plugins.items(): ret.append('-S-Xplugin:{}'.format(jar)) for arg in plugin_args.get(name, []): ret.append('-S-P:{}:{}'.format(name, arg)) return ret def _find_plugins(self): """Returns a map from plugin name to plugin jar.""" # Allow multiple flags and also comma-separated values in a single flag. plugin_names = set([p for val in self.get_options().plugins for p in val.split(',')]) plugins = {} buildroot = get_buildroot() for jar in self.plugin_jars(): with open_zip(jar, 'r') as jarfile: try: with closing(jarfile.open(_PLUGIN_INFO_FILE, 'r')) as plugin_info_file: plugin_info = ElementTree.parse(plugin_info_file).getroot() if plugin_info.tag != 'plugin': raise TaskError( 'File {} in {} is not a valid scalac plugin descriptor'.format(_PLUGIN_INFO_FILE, jar)) name = plugin_info.find('name').text if name in plugin_names: if name in plugins: raise TaskError('Plugin {} defined in {} and in {}'.format(name, plugins[name], jar)) # It's important to use relative paths, as the compiler flags get embedded in the zinc # analysis file, and we port those between systems via the artifact cache. plugins[name] = os.path.relpath(jar, buildroot) except KeyError: pass unresolved_plugins = plugin_names - set(plugins.keys()) if unresolved_plugins: raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins))) return plugins def extra_products(self, target): """Override extra_products to produce a plugin information file.""" ret = [] if target.is_scalac_plugin and target.classname: # NB: We don't yet support explicit in-line compilation of scala compiler plugins from # the workspace to be used in subsequent compile rounds like we do for annotation processors # with javac. This would require another GroupTask similar to AptCompile, but for scala. root, plugin_info_file = self.write_plugin_info(self._plugin_info_dir, target) ret.append((root, [plugin_info_file])) return ret def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file, log_file): # We add compiler_classpath to ensure the scala-library jar is on the classpath. # TODO: This also adds the compiler jar to the classpath, which compiled code shouldn't # usually need. Be more selective? # TODO(John Sirois): Do we need to do this at all? If adding scala-library to the classpath is # only intended to allow target authors to omit a scala-library dependency, then ScalaLibrary # already overrides traversable_dependency_specs to achieve the same end; arguably at a more # appropriate level and certainly at a more appropriate granularity. relativized_classpath = relativize_paths(self.compiler_classpath() + classpath, get_buildroot()) zinc_args = [] zinc_args.extend([ '-log-level', self.get_options().level, '-analysis-cache', analysis_file, '-classpath', ':'.join(relativized_classpath), '-d', classes_output_dir ]) if not self.get_options().colors: zinc_args.append('-no-color') if not self.get_options().name_hashing: zinc_args.append('-no-name-hashing') if log_file: zinc_args.extend(['-capture-log', log_file]) zinc_args.extend(['-compiler-interface', self.tool_jar('compiler-interface')]) zinc_args.extend(['-sbt-interface', self.tool_jar('sbt-interface')]) zinc_args.extend(['-scala-path', ':'.join(self.compiler_classpath())]) zinc_args += self.plugin_args() if upstream_analysis: zinc_args.extend(['-analysis-map', ','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())]) zinc_args += args zinc_args.extend(sources) self.log_zinc_file(analysis_file) if self.runjava(classpath=self.zinc_classpath(), main=self._ZINC_MAIN, jvm_options=self._jvm_options, args=zinc_args, workunit_name='zinc', workunit_labels=[WorkUnit.COMPILER]): raise TaskError('Zinc compile failed.') def log_zinc_file(self, analysis_file): self.context.log.debug('Calling zinc on: {} ({})' .format(analysis_file, hash_file(analysis_file).upper() if os.path.exists(analysis_file) else 'nonexistent')) class ScalaZincCompile(ZincCompile): _language = 'scala' _file_suffix = '.scala' class JavaZincCompile(ZincCompile): _language = 'java' _file_suffix = '.java' @classmethod def get_args_default(cls, bootstrap_option_values): return super(JavaZincCompile, cls).get_args_default(bootstrap_option_values) + ('-java-only',) @classmethod def name(cls): # Use a different name from 'java' so options from JMake version won't interfere. return "zinc-java" @classmethod def register_options(cls, register): super(JavaZincCompile, cls).register_options(register) register('--enabled', action='store_true', default=False, help='Use zinc to compile Java targets') def select(self, target): return self.get_options().enabled and super(JavaZincCompile, self).select(target)
#!/usr/bin/python import os import unittest import mailerlite try: API_KEY = os.environ['PYML_TEST_API_KEY'] except KeyError as e: print('You need to configure a PYML_TEST_API_KEY in your env variables. ' '{0}'.format(e)) class ApiTest(unittest.TestCase): @classmethod def setUpClass(cls): cls._api = mailerlite.Api(API_KEY) # Check if any campaigns exist and grab one to test against. try: # If the all_campaigns method is busted it is still tested, # so this failing for that reason will be reported later. cls._all_campaigns = cls._api.all_campaigns() cls._campaign_id = cls._all_campaigns['Results'][0]['id'] except: cls._campaign_id = None print('No campaigns found, skipping tests that require one.') # # Try to make a list for testing against. # try: # cls._test_list = cls._api.create_list( # 'pyml_test_list_{0}'.format(random.randint(100000, 999999)) # ) # cls._test_list_id = cls._test_list['id'] # except: # cls._test_list_id = None # print('Something went wrong making a new list, skipping tests ' # 'that require a test list.') print 'Running integration tests on the API class.' def test_all_campaigns(self): response = self._api.all_campaigns() expected = ('Page', 'Limit', 'RecordsOnPage', 'Results') self.assertItemsEqual(response, expected) def test_campaign_details(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_details.') response = self._api.campaign_details(self._campaign_id) expected = ( 'unsubscribes', 'uniqueOpens', 'url', 'bounces', 'junk', 'clicks', 'started', 'done', 'total', 'id', 'opens', 'subject' ) self.assertItemsEqual(response, expected) def test_campaign_recipients(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_recipients.') response = self._api.campaign_recipients(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_campaign_opens(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_opens.') response = self._api.campaign_opens(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_campaign_clicks(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_clicks.') response = self._api.campaign_clicks(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_campaign_unsubscribes(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_unsubscribes.') response = self._api.campaign_unsubscribes(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_campaign_bounces(self): if self._campaign_id is None: self.skipTest('No campaigns found to test campaign_bounces.') response = self._api.campaign_bounces(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_campaign_spam_complaints(self): if self._campaign_id is None: self.skipTest( 'No campaigns found to test campaign_spam_complaints.' ) response = self._api.campaign_spam_complaints(self._campaign_id) expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_all_lists(self): response = self._api.all_lists() expected = ( 'RecordsOnPage', 'Limit', 'Results', 'Page' ) self.assertItemsEqual(response, expected) def test_create_list(self): response = self._api.create_list('pyml_test_list') expected = ('id', 'name') try: self._test_list_id = response['id'] except: self._test_list_id = None print( 'Something went wrong making a new list, skipping tests ' 'that require a test list.' ) self.assertItemsEqual(response, expected) def test_list_details(self): if self._test_list_id is None: self.skipTest( 'No test list found to test list_details.' ) response = self._api.list_details(self._test_list_id) expected = ( 'updated', 'bounced', 'name', 'unsubscribed', 'date', 'total', 'id' ) self.assertItemsEqual(response, expected) def test_update_list(self): if self._test_list_id is None: self.skipTest( 'No test list found to test update_list.' ) response = self._api.update_list(self._test_list_id, 'updated_name') expected = {u'id': 2186293, u'name': u'updated_name'} self.assertEqual(response, expected) def test_delete_list(self): if self._test_list_id is None: self.skipTest( 'No test list found to test delete_list.' ) response = self._api.delete_list(self._test_list_id) expected = ( [] ) self.assertEquals(response, expected)
#!/usr/bin/env python from __future__ import with_statement import argparse import sys import logging import urllib, urllib2 import json from fabric.operations import local from fabric.api import hide import yaml VERSION = "0.0.1" SERVER_FILE = ".server" logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) def get_repo_info(): with hide('commands'): f_out = local('git remote -v|grep push|grep origin', capture = True) remote_git = "" start = f_out.find("http") end = f_out.find(".git") remote_git = f_out[start:end] repo_name = remote_git[remote_git.rfind('/')+1:] return repo_name def get_current_branch(): with hide('commands'): f_out = local('git branch', capture = True) start = f_out.find('* ') end = f_out.find('\n') branch = f_out[start+2:end] return branch def get_last_hash(): with hide('commands'): f_out = local('git rev-parse HEAD', capture = True) start = 0 end = f_out.find('\n') branch = f_out[start:end] return branch class Server(object): def __init__(self): try: with open(".server") as f: self.address = f.readlines()[0] self.repo = get_repo_info() self.current_branch = get_current_branch() ok = self.post_to_server('info') logging.debug("endpoint: %s" % (ok)) except IOError: self.address = None def parse_yaml(self,yaml_file): try: data = yaml.load(yaml_file.read()) if data is not None: return data return False except Exception as e: logging.error(e) return False """ Run a normal client deployment """ def deploy(self, git_hash = None): if git_hash is None: git_hash = get_last_hash() deploy = {'hash': git_hash, 'branch': get_current_branch()} req = self.post_to_server("deploy", deploy) result = json.loads(req) self.parse_server_response(result) def parse_server_response(self,result): if result['status'] == "ok": print result['msg'] else: logging.error(result) print ("Error occured: %s" % (result['msg'])) sys.exit() """" Sends a new init configuration for deployment on a branch and current repo """ def init_config(self, config_file): conf = {'conf':self.parse_yaml(config_file)} if not conf['conf']: print "Your config file could not be parsed" sys.exit() req = self.post_to_server("init.config", conf) result = json.loads(req) self.parse_server_response(result) """ Creates the base url for the api """ def get_base_url(self, command = None): return { 'info': 'http://%s' % (self.address), 'init.config': 'http://%s/api/%s/init/' % (self.address, self.repo), 'deploy': 'http://%s/api/%s/deploy/' % (self.address, self.repo), }.get(command, 'http://%s/api/%s' % (self.address, self.repo)) """ Post requests to deploy server """ def post_to_server(self, command = None, data_dict = None): if self.address is not None: url_2 = self.get_base_url(command) if data_dict is not None: logging.debug("sending post data: %s to: %s" % (data_dict, url_2)) data = urllib.urlencode(data_dict) req = urllib2.Request(url_2, data) try: rsp = urllib2.urlopen(req) except urllib2.URLError, e: logging.error("Error 2: couldn't communicate with the server on: %s" % (url_2)) sys.exit() else: req = urllib2.Request(url_2) try: logging.debug("executing get on: %s" % (url_2)) rsp = urllib2.urlopen(req) except urllib2.URLError, e: logging.error("Error 3: couldn't communicate with the server on: %s" % (url_2)) sys.exit() return rsp.read() else: logging.error("Error 4: Can't comunicate with the server") sys.exit() class DeployAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('DeployAction %r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) if values is None: server.deploy() else: server.deploy(values) """ This will read a local config yaml which will be sent to the server If the server will have this repo and branch already configured an error will be trigered. This method can't be used to overwrite config data """ class InitAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('%r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) server.init_config(values) # TODO verify with the server if exists already an initiated config for this repo # if exists an error will be displayed class SetupAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('%r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) server = values # write hidden file with the server address f = open(SERVER_FILE,'w') f.write('%s' %(server)) # python will convert \n to os.linesep f.close() server = Server() parser = argparse.ArgumentParser(description = 'Nursery deplkoy system') parser.add_argument('-v','--version', action = 'version', version = '%(prog)s '+VERSION) parser.add_argument('-s','--setup', nargs='?', metavar='Server', action = SetupAction,help = 'setup a nursery deploy system, you need to specify the nursery server endpoint like: http://www.my-nursery-server.com') # each branch needs it's own config file parser.add_argument('-c','--config', metavar='config.yaml', action = InitAction, type = file,help = 'init a new repo deployment with config file you specify') parser.add_argument('-d','--deploy',nargs='?', metavar='hash', action = DeployAction, type = file,help = 'create a new async deploy') parser.add_argument('-i','--info', action='store_true', help = 'some info Nursery Client knows about') if not len(sys.argv) > 1: parser.print_help() else: args = parser.parse_args() logging.debug(args) if args.info: if server.address is not None: print ("remote deploy server: %s" % server.address) print ("repo: %s" % server.repo) print ("branch: %s" % server.current_branch) # comication with the server - done # setup server (with amazon credentials & stuff) # initialize branch deploy with deploy server # read config yaml and send it to the server - file sent - ok # read the response and show it - ok # read the file on the server - ok #TODO # on the server store the git deploy command so it can be processed assync # 3 way to deploy git, client, forced # - client # client -> git deploy (last hash) -> ok # store in db the command if allow_multiple_deploy & stuff # parse the command assync # build file list # get instances # get scripts # make the deployment # on the server we need to modelate this yaml file to the db # find a good way to insert instances in db # filter a deployment based on touced files # make a deployment
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Pedro Navarro Perez # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Windows Server 2012 This driver requires ISCSI target role installed """ import os import sys from oslo.config import cfg from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.volume import driver # Check needed for unit testing on Unix if os.name == 'nt': import wmi LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS windows_opts = [ cfg.StrOpt('windows_iscsi_lun_path', default='C:\iSCSIVirtualDisks', help='Path to store VHD backed volumes'), ] FLAGS.register_opts(windows_opts) class WindowsDriver(driver.ISCSIDriver): """Executes volume driver commands on Windows Storage server.""" def __init__(self, *args, **kwargs): super(WindowsDriver, self).__init__(*args, **kwargs) def do_setup(self, context): """Setup the Windows Volume driver. Called one time by the manager after the driver is loaded. Validate the flags we care about """ #Set the flags self._conn_wmi = wmi.WMI(moniker='//./root/wmi') self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') def check_for_setup_error(self): """Check that the driver is working and can communicate. """ #Invoking the portal an checking that is listening wt_portal = self._conn_wmi.WT_Portal()[0] listen = wt_portal.Listen if not listen: raise exception.VolumeBackendAPIException() def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance. """ initiator_name = connector['initiator'] target_name = volume['provider_location'] cl = self._conn_wmi.__getattr__("WT_IDMethod") wt_idmethod = cl.new() wt_idmethod.HostName = target_name wt_idmethod.Method = 4 wt_idmethod.Value = initiator_name wt_idmethod.put() #Getting the portal and port information wt_portal = self._conn_wmi.WT_Portal()[0] (address, port) = (wt_portal.Address, wt_portal.Port) #Getting the host information hosts = self._conn_wmi.WT_Host(Hostname=target_name) host = hosts[0] properties = {} properties['target_discovered'] = False properties['target_portal'] = '%s:%s' % (address, port) properties['target_iqn'] = host.TargetIQN properties['target_lun'] = 0 properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given intiator can no longer access it. """ initiator_name = connector['initiator'] provider_location = volume['provider_location'] #DesAssigning target to initiators wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=provider_location, Method=4, Value=initiator_name)[0] wt_idmethod.Delete_() def create_volume(self, volume): """Driver entry point for creating a new volume.""" vhd_path = self._get_vhd_path(volume) vol_name = volume['name'] #The WMI procedure returns a Generic failure cl = self._conn_wmi.__getattr__("WT_Disk") cl.NewWTDisk(DevicePath=vhd_path, Description=vol_name, SizeInMB=volume['size'] * 1024) def _get_vhd_path(self, volume): base_vhd_folder = FLAGS.windows_iscsi_lun_path if not os.path.exists(base_vhd_folder): LOG.debug(_('Creating folder %s '), base_vhd_folder) os.makedirs(base_vhd_folder) return os.path.join(base_vhd_folder, str(volume['name']) + ".vhd") def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" vol_name = volume['name'] wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] wt_disk.Delete_() vhdfiles = self._conn_cimv2.query( "Select * from CIM_DataFile where Name = '" + self._get_vhd_path(volume) + "'") if len(vhdfiles) > 0: vhdfiles[0].Delete() def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. """ #Getting WT_Snapshot class vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] #API Calls gets Generic Failure cl = self._conn_wmi.__getattr__("WT_Snapshot") disk_id = wt_disk.WTD out = cl.Create(WTD=disk_id) #Setting description since it used as a KEY wt_snapshot_created = self._conn_wmi.WT_Snapshot(Id=out[0])[0] wt_snapshot_created.Description = snapshot_name wt_snapshot_created.put() def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for exporting snapshots as volumes.""" snapshot_name = snapshot['name'] wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0] disk_id = wt_snapshot.Export()[0] wt_disk = self._conn_wmi.WT_Disk(WTD=disk_id)[0] wt_disk.Description = volume['name'] wt_disk.put() def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" snapshot_name = snapshot['name'] wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0] wt_snapshot.Delete_() def _do_export(self, _ctx, volume, ensure=False): """Do all steps to get disk exported as LUN 0 at separate target. :param volume: reference of volume to be exported :param ensure: if True, ignore errors caused by already existing resources :return: iscsiadm-formatted provider location string """ target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) #ISCSI target creation try: cl = self._conn_wmi.__getattr__("WT_Host") cl.NewHost(HostName=target_name) except Exception as exc: excep_info = exc.com_error.excepinfo[2] if not ensure or excep_info.find(u'The file exists') == -1: raise else: LOG.info(_('Ignored target creation error "%s"' ' while ensuring export'), exc) #Get the disk to add vol_name = volume['name'] q = self._conn_wmi.WT_Disk(Description=vol_name) if not len(q): LOG.debug(_('Disk not found: %s'), vol_name) return None wt_disk = q[0] wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] wt_host.AddWTDisk(wt_disk.WTD) return target_name def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" self._do_export(context, volume, ensure=True) def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" loc = self._do_export(context, volume, ensure=False) return {'provider_location': loc} def remove_export(self, context, volume): """Driver exntry point to remove an export for a volume. """ target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) #Get ISCSI target wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] wt_host.RemoveAllWTDisks() wt_host.Delete_() def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" raise NotImplementedError() def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" raise NotImplementedError()
#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded TCP server.""" from __future__ import absolute_import, division, print_function import errno import os import socket from tornado import gen from tornado.log import app_log from tornado.ioloop import IOLoop from tornado.iostream import IOStream, SSLIOStream from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket from tornado import process from tornado.util import errno_from_exception try: import ssl except ImportError: # ssl is not available on Google App Engine. ssl = None class TCPServer(object): r"""A non-blocking, single-threaded TCP server. To use `TCPServer`, define a subclass which overrides the `handle_stream` method. For example, a simple echo server could be defined like this:: from tornado.tcpserver import TCPServer from tornado.iostream import StreamClosedError from tornado import gen class EchoServer(TCPServer): @gen.coroutine def handle_stream(self, stream, address): while True: try: data = yield stream.read_until(b"\n") yield stream.write(data) except StreamClosedError: break To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) TCPServer(ssl_options=ssl_ctx) `TCPServer` initialization follows one of three patterns: 1. `listen`: simple single-process:: server = TCPServer() server.listen(8888) IOLoop.current().start() 2. `bind`/`start`: simple multi-process:: server = TCPServer() server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `TCPServer` constructor. `start` will always start the server on the default singleton `.IOLoop`. 3. `add_sockets`: advanced multi-process:: sockets = bind_sockets(8888) tornado.process.fork_processes(0) server = TCPServer() server.add_sockets(sockets) IOLoop.current().start() The `add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `~tornado.netutil.bind_sockets`. .. versionadded:: 3.1 The ``max_buffer_size`` argument. .. versionchanged:: 5.0 The ``io_loop`` argument has been removed. """ def __init__(self, ssl_options=None, max_buffer_size=None, read_chunk_size=None): self.io_loop = IOLoop.current() self.ssl_options = ssl_options self._sockets = {} # fd -> socket object self._pending_sockets = [] self._started = False self._stopped = False self.max_buffer_size = max_buffer_size self.read_chunk_size = read_chunk_size # Verify the SSL options. Otherwise we don't get errors until clients # connect. This doesn't verify that the keys are legitimate, but # the SSL module doesn't do that until there is a connected socket # which seems like too much work if self.ssl_options is not None and isinstance(self.ssl_options, dict): # Only certfile is required: it can contain both keys if 'certfile' not in self.ssl_options: raise KeyError('missing key "certfile" in ssl_options') if not os.path.exists(self.ssl_options['certfile']): raise ValueError('certfile "%s" does not exist' % self.ssl_options['certfile']) if ('keyfile' in self.ssl_options and not os.path.exists(self.ssl_options['keyfile'])): raise ValueError('keyfile "%s" does not exist' % self.ssl_options['keyfile']) def listen(self, port, address=""): """Starts accepting connections on the given port. This method may be called more than once to listen on multiple ports. `listen` takes effect immediately; it is not necessary to call `TCPServer.start` afterwards. It is, however, necessary to start the `.IOLoop`. """ sockets = bind_sockets(port, address=address) self.add_sockets(sockets) def add_sockets(self, sockets): """Makes this server start accepting connections on the given sockets. The ``sockets`` parameter is a list of socket objects such as those returned by `~tornado.netutil.bind_sockets`. `add_sockets` is typically used in combination with that method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server. """ for sock in sockets: self._sockets[sock.fileno()] = sock add_accept_handler(sock, self._handle_connection) def add_socket(self, socket): """Singular version of `add_sockets`. Takes a single socket object.""" self.add_sockets([socket]) def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128, reuse_port=False): """Binds this server to the given port on the given address. To start the server, call `start`. If you want to run this server in a single process, you can call `listen` as a shortcut to the sequence of `bind` and `start` calls. Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument has the same meaning as for `.bind_sockets`. This method may be called multiple times prior to `start` to listen on multiple ports or interfaces. .. versionchanged:: 4.4 Added the ``reuse_port`` argument. """ sockets = bind_sockets(port, address=address, family=family, backlog=backlog, reuse_port=reuse_port) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets) def start(self, num_processes=1): """Starts this server in the `.IOLoop`. By default, we run the server in this process and do not fork any additional child process. If num_processes is ``None`` or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If num_processes is given and > 1, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``TCPServer.start(n)``. """ assert not self._started self._started = True if num_processes != 1: process.fork_processes(num_processes) sockets = self._pending_sockets self._pending_sockets = [] self.add_sockets(sockets) def stop(self): """Stops listening for new connections. Requests currently in progress may still continue after the server is stopped. """ if self._stopped: return self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd self.io_loop.remove_handler(fd) sock.close() def handle_stream(self, stream, address): """Override to handle a new `.IOStream` from an incoming connection. This method may be a coroutine; if so any exceptions it raises asynchronously will be logged. Accepting of incoming connections will not be blocked by this coroutine. If this `TCPServer` is configured for SSL, ``handle_stream`` may be called before the SSL handshake has completed. Use `.SSLIOStream.wait_for_handshake` if you need to verify the client's certificate or use NPN/ALPN. .. versionchanged:: 4.2 Added the option for this method to be a coroutine. """ raise NotImplementedError() def _handle_connection(self, connection, address): if self.ssl_options is not None: assert ssl, "Python 2.6+ and OpenSSL required for SSL" try: connection = ssl_wrap_socket(connection, self.ssl_options, server_side=True, do_handshake_on_connect=False) except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_EOF: return connection.close() else: raise except socket.error as err: # If the connection is closed immediately after it is created # (as in a port scan), we can get one of several errors. # wrap_socket makes an internal call to getpeername, # which may return either EINVAL (Mac OS X) or ENOTCONN # (Linux). If it returns ENOTCONN, this error is # silently swallowed by the ssl module, so we need to # catch another error later on (AttributeError in # SSLIOStream._do_ssl_handshake). # To test this behavior, try nmap with the -sT flag. # https://github.com/tornadoweb/tornado/pull/750 if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): return connection.close() else: raise try: if self.ssl_options is not None: stream = SSLIOStream(connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) else: stream = IOStream(connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) future = self.handle_stream(stream, address) if future is not None: self.io_loop.add_future(gen.convert_yielded(future), lambda f: f.result()) except Exception: app_log.error("Error in connection callback", exc_info=True)
# Copyright (c) 2016-2017, Neil Booth # # All rights reserved. # # See the file "LICENCE" for information about the copyright # and warranty status of this software. '''Classes for local RPC server and remote client TCP/SSL servers.''' import codecs import time from functools import partial from lib.hash import sha256, hash_to_str from lib.jsonrpc import JSONSession, RPCError, JSONRPCv2, JSONRPC from server.daemon import DaemonError import server.version as version class SessionBase(JSONSession): '''Base class of ElectrumX JSON sessions. Each session runs its tasks in asynchronous parallelism with other sessions. ''' def __init__(self, controller, kind): # Force v2 as a temporary hack for old Coinomi wallets # Remove in April 2017 super().__init__(version=JSONRPCv2) self.kind = kind # 'RPC', 'TCP' etc. self.controller = controller self.bp = controller.bp self.env = controller.env self.daemon = self.bp.daemon self.client = 'unknown' self.client_version = (1) self.protocol_version = '1.0' self.anon_logs = self.env.anon_logs self.last_delay = 0 self.txs_sent = 0 self.requests = [] self.start_time = time.time() self.close_time = 0 self.bw_limit = self.env.bandwidth_limit self.bw_time = self.start_time self.bw_interval = 3600 self.bw_used = 0 def close_connection(self): '''Call this to close the connection.''' self.close_time = time.time() super().close_connection() def peername(self, *, for_log=True): '''Return the peer address and port.''' return self.peer_addr(anon=for_log and self.anon_logs) def flags(self): '''Status flags.''' status = self.kind[0] if self.is_closing(): status += 'C' if self.log_me: status += 'L' status += str(self.controller.session_priority(self)) return status def connection_made(self, transport): '''Handle an incoming client connection.''' super().connection_made(transport) self.controller.add_session(self) def connection_lost(self, exc): '''Handle client disconnection.''' super().connection_lost(exc) msg = '' if self.pause: msg += ' whilst paused' if self.controller.is_deprioritized(self): msg += ' whilst deprioritized' if self.send_size >= 1024*1024: msg += ('. Sent {:,d} bytes in {:,d} messages' .format(self.send_size, self.send_count)) if msg: msg = 'disconnected' + msg self.log_info(msg) self.controller.remove_session(self) def using_bandwidth(self, amount): now = time.time() # Reduce the recorded usage in proportion to the elapsed time elapsed = now - self.bw_time self.bandwidth_start = now refund = int(elapsed / self.bw_interval * self.bw_limit) refund = min(refund, self.bw_used) self.bw_used += amount - refund def sub_count(self): return 0 class ElectrumX(SessionBase): '''A TCP server that handles incoming Electrum connections.''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.subscribe_headers = False self.subscribe_height = False self.notified_height = None self.max_send = self.env.max_send self.max_subs = self.env.max_session_subs self.hashX_subs = {} self.mempool_statuses = {} self.chunk_indices = [] self.electrumx_handlers = { 'blockchain.address.subscribe': self.address_subscribe, 'blockchain.block.get_chunk': self.block_get_chunk, 'blockchain.headers.subscribe': self.headers_subscribe, 'blockchain.numblocks.subscribe': self.numblocks_subscribe, 'blockchain.script_hash.subscribe': self.script_hash_subscribe, 'blockchain.transaction.broadcast': self.transaction_broadcast, 'server.add_peer': self.add_peer, 'server.banner': self.banner, 'server.features': self.server_features, 'server.peers.subscribe': self.peers_subscribe, 'server.version': self.server_version, } def sub_count(self): return len(self.hashX_subs) async def notify(self, height, touched): '''Notify the client about changes in height and touched addresses. Cache is a shared cache for this update. ''' pairs = [] changed = [] matches = touched.intersection(self.hashX_subs) for hashX in matches: alias = self.hashX_subs[hashX] status = await self.address_status(hashX) changed.append((alias, status)) if height != self.notified_height: self.notified_height = height if self.subscribe_headers: args = (self.controller.electrum_header(height), ) pairs.append(('blockchain.headers.subscribe', args)) if self.subscribe_height: pairs.append(('blockchain.numblocks.subscribe', (height, ))) # Check mempool hashXs - the status is a function of the # confirmed state of other transactions for hashX in set(self.mempool_statuses).difference(matches): old_status = self.mempool_statuses[hashX] status = await self.address_status(hashX) if status != old_status: alias = self.hashX_subs[hashX] changed.append((alias, status)) for alias_status in changed: if len(alias_status[0]) == 64: method = 'blockchain.script_hash.subscribe' else: method = 'blockchain.address.subscribe' pairs.append((method, alias_status)) if pairs: self.send_notifications(pairs) if changed: es = '' if len(changed) == 1 else 'es' self.log_info('notified of {:,d} address{}' .format(len(changed), es)) def height(self): '''Return the current flushed database height.''' return self.bp.db_height def current_electrum_header(self): '''Used as response to a headers subscription request.''' return self.controller.electrum_header(self.height()) def headers_subscribe(self): '''Subscribe to get headers of new blocks.''' self.subscribe_headers = True return self.current_electrum_header() def numblocks_subscribe(self): '''Subscribe to get height of new blocks.''' self.subscribe_height = True return self.height() async def add_peer(self, features): '''Add a peer (but only if the peer resolves to the source).''' peer_mgr = self.controller.peer_mgr return await peer_mgr.on_add_peer(features, self.peer_info()) def peers_subscribe(self): '''Return the server peers as a list of (ip, host, details) tuples.''' return self.controller.peer_mgr.on_peers_subscribe(self.is_tor()) async def address_status(self, hashX): '''Returns an address status. Status is a hex string, but must be None if there is no history. ''' # Note history is ordered and mempool unordered in electrum-server # For mempool, height is -1 if unconfirmed txins, otherwise 0 history = await self.controller.get_history(hashX) mempool = await self.controller.mempool_transactions(hashX) status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height) for tx_hash, height in history) status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed) for hex_hash, tx_fee, unconfirmed in mempool) if status: status = sha256(status.encode()).hex() else: status = None if mempool: self.mempool_statuses[hashX] = status else: self.mempool_statuses.pop(hashX, None) return status async def hashX_subscribe(self, hashX, alias): # First check our limit. if len(self.hashX_subs) >= self.max_subs: raise RPCError('your address subscription limit {:,d} reached' .format(self.max_subs)) # Now let the controller check its limit self.controller.new_subscription() self.hashX_subs[hashX] = alias return await self.address_status(hashX) async def address_subscribe(self, address): '''Subscribe to an address. address: the address to subscribe to''' hashX = self.controller.address_to_hashX(address) return await self.hashX_subscribe(hashX, address) async def script_hash_subscribe(self, script_hash): '''Subscribe to a script hash. script_hash: the SHA256 hash of the script to subscribe to''' hashX = self.controller.script_hash_to_hashX(script_hash) return await self.hashX_subscribe(hashX, script_hash) def server_features(self): '''Returns a dictionary of server features.''' return self.controller.peer_mgr.my_clearnet_peer().features def block_get_chunk(self, index): '''Return a chunk of block headers as a hexadecimal string. index: the chunk index''' index = self.controller.non_negative_integer(index) if self.client_version < (2, 8, 3): self.chunk_indices.append(index) self.chunk_indices = self.chunk_indices[-5:] # -2 allows backing up a single chunk but no more. if index <= max(self.chunk_indices[:-2], default=-1): msg = ('chunk indices not advancing (wrong network?): {}' .format(self.chunk_indices)) # use INVALID_REQUEST to trigger a disconnect raise RPCError(msg, JSONRPC.INVALID_REQUEST) return self.controller.get_chunk(index) def is_tor(self): '''Try to detect if the connection is to a tor hidden service we are running.''' proxy = self.controller.peer_mgr.proxy peer_info = self.peer_info() return peer_info and peer_info[0] == proxy.ip_addr async def replaced_banner(self, banner): network_info = await self.controller.daemon_request('getnetworkinfo') ni_version = network_info['version'] major, minor = divmod(ni_version, 1000000) minor, revision = divmod(minor, 10000) revision //= 100 daemon_version = '{:d}.{:d}.{:d}'.format(major, minor, revision) for pair in [ ('$VERSION', version.VERSION), ('$DAEMON_VERSION', daemon_version), ('$DAEMON_SUBVERSION', network_info['subversion']), ('$DONATION_ADDRESS', self.env.donation_address), ]: banner = banner.replace(*pair) return banner async def banner(self): '''Return the server banner text.''' banner = 'Welcome to Electrum!' if self.is_tor(): banner_file = self.env.tor_banner_file else: banner_file = self.env.banner_file if banner_file: try: with codecs.open(banner_file, 'r', 'utf-8') as f: banner = f.read() except Exception as e: self.log_error('reading banner file {}: {}' .format(banner_file, e)) else: banner = await self.replaced_banner(banner) return banner def server_version(self, client_name=None, protocol_version=None): '''Returns the server version as a string. client_name: a string identifying the client protocol_version: the protocol version spoken by the client ''' if client_name: self.client = str(client_name)[:17] try: self.client_version = tuple(int(part) for part in self.client.split('.')) except Exception: pass if protocol_version is not None: self.protocol_version = protocol_version return version.VERSION async def transaction_broadcast(self, raw_tx): '''Broadcast a raw transaction to the network. raw_tx: the raw transaction as a hexadecimal string''' # An ugly API: current Electrum clients only pass the raw # transaction in hex and expect error messages to be returned in # the result field. And the server shouldn't be doing the client's # user interface job here. try: tx_hash = await self.daemon.sendrawtransaction([raw_tx]) self.txs_sent += 1 self.log_info('sent tx: {}'.format(tx_hash)) self.controller.sent_tx(tx_hash) return tx_hash except DaemonError as e: error = e.args[0] message = error['message'] self.log_info('sendrawtransaction: {}'.format(message), throttle=True) if 'non-mandatory-script-verify-flag' in message: return ( 'Your client produced a transaction that is not accepted ' 'by the network any more. Please upgrade to Electrum ' '2.5.1 or newer.' ) return ( 'The transaction was rejected by network rules. ({})\n[{}]' .format(message, raw_tx) ) def request_handler(self, method): '''Return the async handler for the given request method.''' handler = self.electrumx_handlers.get(method) if not handler: handler = self.controller.electrumx_handlers.get(method) return handler class LocalRPC(SessionBase): '''A local TCP RPC server session.''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.client = 'RPC' self.max_send = 0 def request_handler(self, method): '''Return the async handler for the given request method.''' return self.controller.rpc_handlers.get(method)
# Copyright (c) 2014, MapR Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sahara.plugins.mapr.util.func_utils as fu import sahara.tests.unit.base as b class PredicatesTest(b.SaharaTestCase): def test_true_predicate(self): self.assertTrue(fu.true_predicate(None)) def test_false_predicate(self): self.assertFalse(fu.false_predicate(None)) def test_not_predicate(self): self.assertFalse(fu.not_predicate(fu.true_predicate)(None)) self.assertTrue(fu.not_predicate(fu.false_predicate)(None)) def test_and_predicate(self): true_p = fu.true_predicate false_p = fu.false_predicate and_p = fu.and_predicate self.assertTrue(and_p(true_p, true_p)(None)) self.assertFalse(and_p(false_p, true_p)(None)) self.assertFalse(and_p(true_p, false_p)(None)) self.assertFalse(and_p(false_p, false_p)(None)) def test_or_predicate(self): true_p = fu.true_predicate false_p = fu.false_predicate or_p = fu.or_predicate self.assertTrue(or_p(true_p, true_p)(None)) self.assertTrue(or_p(false_p, true_p)(None)) self.assertTrue(or_p(true_p, false_p)(None)) self.assertFalse(or_p(false_p, false_p)(None)) def test_field_equals_predicate(self): field_equals_p = fu.field_equals_predicate arg = {'a': 'a', 'b': 'b'} self.assertTrue(field_equals_p('a', 'a')(arg)) self.assertFalse(field_equals_p('b', 'a')(arg)) def test_like_predicate(self): like_p = fu.like_predicate arg = {'a': 'a', 'b': 'b', 'c': 'c'} self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'c'})(arg)) self.assertTrue(like_p({'a': 'a', 'b': 'b'})(arg)) self.assertTrue(like_p({'a': 'a'})(arg)) self.assertTrue(like_p({'a': 'a'}, ['a'])(arg)) self.assertTrue(like_p({})(arg)) self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'a'}, ['c'])(arg)) self.assertFalse(like_p({'a': 'a', 'b': 'b', 'c': 'a'})(arg)) self.assertFalse(like_p({'a': 'a', 'c': 'a'})(arg)) self.assertFalse(like_p({'c': 'a'}, ['a'])(arg)) def test_in_predicate(self): in_p = fu.in_predicate arg = {'a': 'a', 'b': 'b'} self.assertTrue(in_p('a', ['a', 'b'])(arg)) self.assertFalse(in_p('a', ['c', 'b'])(arg)) self.assertFalse(in_p('a', [])(arg)) class FunctionsTest(b.SaharaTestCase): def test_copy_function(self): copy_f = fu.copy_function arg = {'a': 'a'} actual = copy_f()(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_append_field_function(self): append_field_f = fu.append_field_function arg = {'a': 'a'} actual = append_field_f('b', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_append_fields_function(self): append_fields_f = fu.append_fields_function arg = {'a': 'a'} actual = append_fields_f({'b': 'b', 'c': 'c'})(arg) expected = {'a': 'a', 'b': 'b', 'c': 'c'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = append_fields_f({'b': 'b'})(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = append_fields_f({})(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_get_values_pair_function(self): get_values_pair_f = fu.get_values_pair_function arg = {'a': 'a', 'b': 'b'} actual = get_values_pair_f('a', 'b')(arg) expected = ('a', 'b') self.assertEqual(expected, actual) def test_get_field_function(self): get_field_f = fu.get_field_function arg = {'a': 'a', 'b': 'b'} actual = get_field_f('a')(arg) expected = ('a', 'a') self.assertEqual(expected, actual) def test_get_fields_function(self): get_fields_f = fu.get_fields_function arg = {'a': 'a', 'b': 'b'} actual = get_fields_f(['a', 'b'])(arg) expected = [('a', 'a'), ('b', 'b')] self.assertEqual(expected, actual) actual = get_fields_f(['a'])(arg) expected = [('a', 'a')] self.assertEqual(expected, actual) def test_extract_fields_function(self): extract_fields_f = fu.extract_fields_function arg = {'a': 'a', 'b': 'b'} actual = extract_fields_f(['a', 'b'])(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) actual = extract_fields_f(['a'])(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) def test_get_value_function(self): get_value_f = fu.get_value_function arg = {'a': 'a', 'b': 'b'} actual = get_value_f('a')(arg) expected = 'a' self.assertEqual(expected, actual) def test_set_default_value_function(self): set_default_value_f = fu.set_default_value_function arg = {'a': 'a'} actual = set_default_value_f('b', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_value_f('a', 'b')(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_set_default_values_function(self): set_default_values_f = fu.set_default_values_function arg = {'a': 'a'} actual = set_default_values_f({'a': 'b', 'c': 'c'})(arg) expected = {'a': 'a', 'c': 'c'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_values_f({'b': 'b'})(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_values_f({})(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_values_pair_to_dict_function(self): values_pair_to_dict_f = fu.values_pair_to_dict_function arg = ('a', 'b') actual = values_pair_to_dict_f('a', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual)
from docker.errors import APIError from cattle.plugins.docker import docker_client # TODO cattle.plugins.load_plugins() somehow make cattle.plugin.* modules # unavailable, importing it first import cattle.plugins.docker # NOQA from cattle.plugins.docker.network.setup import NetworkSetup from cattle.plugins.host_info.main import HostInfo from .common_fixtures import * # NOQA import pytest import time from cattle import CONFIG_OVERRIDE, Config if_docker = pytest.mark.skipif('os.environ.get("DOCKER_TEST") == "false"', reason='DOCKER_TEST is not set') CONFIG_OVERRIDE['DOCKER_HOST_IP'] = '1.2.3.4' def _delete_container(name): client = docker_client() for c in client.containers(all=True): for container_name in c['Names']: if name == container_name: try: client.kill(c) except: pass client.remove_container(c) def _get_container(name): client = docker_client() for c in client.containers(all=True): for container_name in c['Names']: if name == container_name: return c return None @if_docker def test_image_list(): c = docker_client() images = c.images(all=True) if len(images) == 0: c.pull('busybox') images = c.images(all=True) assert 'Id' in images[0] assert 'ID' not in images[0] @if_docker def test_image_activate(agent, responses): try: docker_client().remove_image('ibuildthecloud/helloworld:latest') except APIError: pass def post(req, resp): image_data = resp['data']['imageStoragePoolMap']['+data'] del image_data['dockerImage']['VirtualSize'] event_test(agent, 'docker/image_activate', post_func=post) @if_docker def test_volume_activate(agent, responses): event_test(agent, 'docker/volume_activate') @if_docker def test_volume_deactivate(agent, responses): event_test(agent, 'docker/volume_deactivate') @if_docker def test_instance_activate_need_pull_image(agent, responses): try: docker_client().remove_image('ibuildthecloud/helloworld:latest') except APIError: pass test_instance_only_activate(agent, responses) @if_docker def test_instance_only_activate(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] for nic in instance['nics']: nic['macAddress'] = '' def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate', pre_func=pre, post_func=post) @if_docker def test_instance_activate_no_mac_address(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] for nic in instance['nics']: nic['macAddress'] = '' def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] mac_received = docker_inspect['Config']['MacAddress'] mac_nic_received = docker_inspect['NetworkSettings']['MacAddress'] assert mac_received == '' assert mac_nic_received is not None instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate', pre_func=pre, post_func=post) @if_docker def test_instance_activate_mac_address(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] mac_received = docker_inspect['Config']['MacAddress'] mac_nic_received = docker_inspect['NetworkSettings']['MacAddress'] assert mac_nic_received == '02:03:04:05:06:07' assert mac_received == '02:03:04:05:06:07' instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate', post_func=post) def test_multiple_nics_pick_mac(): instance = { 'nics': [ { 'macAddress': '02:03:04:05:06:07', 'deviceNumber': 0 }, { 'macAddress': '02:03:04:05:06:09', 'deviceNumber': 1 } ] } instance = JsonObject(instance) config = {'test': 'Nothing'} NetworkSetup().before_start(instance, None, config, None) assert config['mac_address'] == '02:03:04:05:06:07' @if_docker def test_instance_activate_ports(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] del instance_data['dockerInspect'] docker_container = instance_data['dockerContainer'] fields = instance_data['+fields'] del docker_container['Created'] del docker_container['Id'] del docker_container['Status'] del fields['dockerIp'] assert len(docker_container['Ports']) == 1 assert docker_container['Ports'][0]['PrivatePort'] == 8080 assert docker_container['Ports'][0]['Type'] == 'tcp' event_test(agent, 'docker/instance_activate_ports', post_func=post) @if_docker def test_instance_activate_links(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): id = resp['data']['instanceHostMap']['instance'] id = id['+data']['dockerContainer']['Id'] inspect = docker_client().inspect_container(id) instance_activate_common_validation(resp) env = inspect['Config']['Env'] assert 'MYSQL_NAME=/cattle/mysql' in env assert 'MYSQL_PORT=udp://127.0.0.2:12346' in env assert 'MYSQL_PORT_3307_UDP=udp://127.0.0.2:12346' in env assert 'MYSQL_PORT_3307_UDP_ADDR=127.0.0.2' in env assert 'MYSQL_PORT_3307_UDP_PORT=12346' in env assert 'MYSQL_PORT_3307_UDP_PROTO=udp' in env assert 'MYSQL_PORT_3306_TCP=tcp://127.0.0.1:12345' in env assert 'MYSQL_PORT_3306_TCP_ADDR=127.0.0.1' in env assert 'MYSQL_PORT_3306_TCP_PORT=12345' in env assert 'MYSQL_PORT_3306_TCP_PROTO=tcp' in env assert 'REDIS_NAME=/cattle/redis' in env assert 'REDIS_PORT=udp://127.0.0.1:23456' in env assert 'REDIS_PORT_26_UDP=udp://127.0.0.1:23456' in env assert 'REDIS_PORT_26_UDP_ADDR=127.0.0.1' in env assert 'REDIS_PORT_26_UDP_PORT=23456' in env assert 'REDIS_PORT_26_UDP_PROTO=udp' in env event_test(agent, 'docker/instance_activate_links', post_func=post) @if_docker def test_instance_activate_links_no_service(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') _delete_container('/target_redis') _delete_container('/target_mysql') client = docker_client() c = client.create_container('ibuildthecloud/helloworld', ports=['3307/udp', '3306/tcp'], name='target_mysql') client.start(c, port_bindings={ '3307/udp': ('127.0.0.2', 12346), '3306/tcp': ('127.0.0.2', 12345) }) c = client.create_container('ibuildthecloud/helloworld', name='target_redis') client.start(c) def post(req, resp): id = resp['data']['instanceHostMap']['instance'] id = id['+data']['dockerContainer']['Id'] inspect = docker_client().inspect_container(id) instance_activate_common_validation(resp) assert set( ['/target_mysql:/c861f990-4472-4fa1-960f-65171b544c28/mysql', '/target_redis:/c861f990-4472-4fa1-960f-65171b544c28/' 'redis']) == set(inspect['HostConfig']['Links']) event_test(agent, 'docker/instance_activate_links_no_service', post_func=post) @if_docker def test_instance_activate_cpu_set(agent, responses): def pre(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['cpuSet'] = '0,1' def preNull(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['cpuSet'] = None def preEmpty(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['cpuSet'] = '' def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Cpuset'] == '0,1' container_field_test_boiler_plate(resp) def postNull(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Cpuset'] == '' container_field_test_boiler_plate(resp) def postEmpty(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Cpuset'] == '' container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) event_test(agent, schema, pre_func=preNull, post_func=postNull) event_test(agent, schema, pre_func=preEmpty, post_func=postEmpty) @if_docker def test_instance_activate_memory_swap(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['memory'] = 8000000 instance['data']['fields']['memorySwap'] = 16000000 def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['MemorySwap'] == 16000000 assert docker_inspect['Config']['Memory'] == 8000000 container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_entrypoint(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['entryPoint'] = ["./sleep.sh"] def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Entrypoint'] == ["./sleep.sh"] docker_container = instance_data['dockerContainer'] docker_container['Command'] = "/sleep.sh" container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_memory(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['memory'] = 8000000 def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Memory'] == 8000000 container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_tty(agent, responses): def preFalse(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['tty'] = False def pre(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['tty'] = True def postFalse(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert not docker_inspect['Config']['Tty'] container_field_test_boiler_plate(resp) def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Tty'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) event_test(agent, schema, pre_func=preFalse, post_func=postFalse) @if_docker def test_instance_activate_stdinOpen(agent, responses): def preTrueDetach(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['stdinOpen'] = True instance['data']['fields']['detach'] = True def preFalse(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['stdinOpen'] = False instance['data']['fields']['detach'] = False def pre(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['stdinOpen'] = True instance['data']['fields']['detach'] = False def postTrueDetach(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert not docker_inspect['Config']['StdinOnce'] assert docker_inspect['Config']['OpenStdin'] assert not docker_inspect['Config']['AttachStdin'] container_field_test_boiler_plate(resp) def postFalse(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert not docker_inspect['Config']['StdinOnce'] assert not docker_inspect['Config']['OpenStdin'] assert not docker_inspect['Config']['AttachStdin'] container_field_test_boiler_plate(resp) def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['StdinOnce'] assert docker_inspect['Config']['OpenStdin'] assert docker_inspect['Config']['AttachStdin'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) event_test(agent, schema, pre_func=preFalse, post_func=postFalse) event_test(agent, schema, pre_func=preTrueDetach, post_func=postTrueDetach) @if_docker def test_instance_activate_lxc_conf(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') expectedLxcConf = {"lxc.network.type": "veth"} def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['lxcConf'] = expectedLxcConf def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] for conf in docker_inspect['HostConfig']['LxcConf']: assert expectedLxcConf[conf['Key']] == conf['Value'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_domainname(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['domainName'] = "rancher.io" def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['Domainname'] == "rancher.io" container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_devices(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') input_devices = ['/dev/null:/dev/xnull', '/dev/random:/dev/xrandom:rw'] expected_devices = {} for input_device in input_devices: parts_of_device = input_device.split(':') key = parts_of_device[0] expected_devices[key] = { "PathOnHost": parts_of_device[0], "PathInContainer": parts_of_device[1] } if len(parts_of_device) == 3: expected_devices[key]["CgroupPermissions"] = parts_of_device[2] else: expected_devices[key]["CgroupPermissions"] = "rwm" def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['devices'] = input_devices def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] actual_devices = docker_inspect['HostConfig']['Devices'] assert len(expected_devices) == len(actual_devices) for act_dvc in actual_devices: exp_dvc = expected_devices[act_dvc['PathOnHost']] assert exp_dvc['PathOnHost'] == act_dvc['PathOnHost'] assert exp_dvc['PathInContainer'] == act_dvc['PathInContainer'] assert exp_dvc['CgroupPermissions'] == act_dvc['CgroupPermissions'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_dns(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['dns'] = ["1.2.3.4", "8.8.8.8"] instance['data']['fields']['dnsSearch'] = ["5.6.7.8", "7.7.7.7"] def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] actual_dns = docker_inspect['HostConfig']['Dns'] actual_dns_search = docker_inspect['HostConfig']['DnsSearch'] assert set(actual_dns) == set(["8.8.8.8", "1.2.3.4"]) assert set(actual_dns_search) == set(["7.7.7.7", "5.6.7.8"]) container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_caps(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['capAdd'] = ["MKNOD", "SYS_ADMIN"] instance['data']['fields']['capDrop'] = ["MKNOD", "SYS_ADMIN"] def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] set_actual_cap_add = set(docker_inspect['HostConfig']['CapAdd']) set_expected_cap_add = set(["MKNOD", "SYS_ADMIN"]) assert set_actual_cap_add == set_expected_cap_add set_actual_cap_drop = set(docker_inspect['HostConfig']['CapDrop']) set_expected_cap_drop = set(["MKNOD", "SYS_ADMIN"]) assert set_actual_cap_drop == set_expected_cap_drop container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_privileged(agent, responses): def preTrue(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['privileged'] = True def preFalse(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['privileged'] = False def postTrue(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') docker_inspect = instance_data['dockerInspect'] assert docker_inspect['HostConfig']['Privileged'] container_field_test_boiler_plate(resp) def postFalse(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert not docker_inspect['HostConfig']['Privileged'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=preTrue, post_func=postTrue) event_test(agent, schema, pre_func=preFalse, post_func=postFalse) @if_docker def test_instance_restart_policy(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') expected_restart_pol_1 = {"maximumRetryCount": 0, "name": "always"} expected_restart_pol_2 = {"name": "on-failure", "maximumRetryCount": 2, } expected_restart_pol_3 = {"name": "always"} def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['restartPolicy'] = expected_restart_pol_1 def pre_failure_policy(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['restartPolicy'] = expected_restart_pol_2 def pre_name_policy(req): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['restartPolicy'] = expected_restart_pol_3 def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] act_restart_pol = docker_inspect['HostConfig']['RestartPolicy'] assert act_restart_pol['Name'] == expected_restart_pol_1['name'] assert act_restart_pol['MaximumRetryCount'] == expected_restart_pol_1[ 'maximumRetryCount'] container_field_test_boiler_plate(resp) def post_failure_policy(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] act_restart_pol = docker_inspect['HostConfig']['RestartPolicy'] assert act_restart_pol['Name'] == expected_restart_pol_2['name'] assert act_restart_pol['MaximumRetryCount'] == expected_restart_pol_2[ 'maximumRetryCount'] container_field_test_boiler_plate(resp) def post_name_policy(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] act_restart_pol = docker_inspect['HostConfig']['RestartPolicy'] assert act_restart_pol['Name'] == expected_restart_pol_3['name'] container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) event_test(agent, schema, pre_func=pre_failure_policy, post_func=post_failure_policy) event_test(agent, schema, pre_func=pre_name_policy, post_func=post_name_policy) @if_docker def test_instance_activate_cpu_shares(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def pre(req): instance = req['data']['instanceHostMap']['instance'] instance['data']['fields']['cpuShares'] = 400 def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] docker_inspect = instance_data['dockerInspect'] assert docker_inspect['Config']['CpuShares'] == 400 container_field_test_boiler_plate(resp) schema = 'docker/instance_activate_fields' event_test(agent, schema, pre_func=pre, post_func=post) @if_docker def test_instance_activate_ipsec(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_ipsec', post_func=post) @if_docker def test_instance_activate_agent_instance_localhost(agent, responses): CONFIG_OVERRIDE['CONFIG_URL'] = 'https://localhost:1234/a/path' _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): id = resp['data']['instanceHostMap']['instance'] id = id['+data']['dockerContainer']['Id'] inspect = docker_client().inspect_container(id) instance_activate_common_validation(resp) port = Config.api_proxy_listen_port() assert 'CATTLE_CONFIG_URL_SCHEME=https' in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PATH=/a/path' in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PORT={0}'.format(port) in \ inspect['Config']['Env'] event_test(agent, 'docker/instance_activate_agent_instance', post_func=post) @if_docker def test_instance_activate_agent_instance(agent, responses): CONFIG_OVERRIDE['CONFIG_URL'] = 'https://something.fake:1234/a/path' _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): id = resp['data']['instanceHostMap']['instance'] id = id['+data']['dockerContainer']['Id'] inspect = docker_client().inspect_container(id) instance_activate_common_validation(resp) port = Config.api_proxy_listen_port() assert 'CATTLE_CONFIG_URL={0}'.format(Config.config_url()) in \ inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_SCHEME=https' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PATH=/a/path' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PORT={0}'.format(port) not in \ inspect['Config']['Env'] assert 'ENV1=value1' in inspect['Config']['Env'] event_test(agent, 'docker/instance_activate_agent_instance', post_func=post) def _sort_ports(docker_container): docker_container['Ports'] = sorted(docker_container['Ports'], key=lambda x: 1-x['PrivatePort']) return docker_container @if_docker def test_instance_activate_volumes(agent, responses): _delete_container('/c-c861f990-4472-4fa1-960f-65171b544c28') _delete_container('/target_volumes_from') client = docker_client() c = client.create_container('ibuildthecloud/helloworld', volumes=['/volumes_from_path'], name='target_volumes_from') client.start(c) def post(req, resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] inspect = instance_data['dockerInspect'] assert inspect['Volumes']['/host/proc'] == '/proc' assert inspect['Volumes']['/host/sys'] == '/sys' assert inspect['Volumes']['/random'] is not None assert inspect['Volumes']['/volumes_from_path'] is not None assert len(inspect['Volumes']) == 4 assert inspect['VolumesRW'] == { '/host/proc': True, '/host/sys': False, '/random': True, '/volumes_from_path': True, } assert set(['/sys:/host/sys:ro', '/proc:/host/proc:rw']) == set( inspect['HostConfig']['Binds']) instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_volumes', post_func=post) @if_docker def test_instance_activate_null_command(agent, responses): _delete_container('/c-c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_command_null', post_func=post) @if_docker def test_instance_activate_command(agent, responses): _delete_container('/c-c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_command', post_func=post) @if_docker def test_instance_activate_command_args(agent, responses): _delete_container('/ca-c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_command_args', post_func=post) @if_docker def test_instance_deactivate(agent, responses): test_instance_only_activate(agent, responses) def post(req, resp): container_field_test_boiler_plate(resp) start = time.time() event_test(agent, 'docker/instance_deactivate', post_func=post) end = time.time() assert end - start < 1 def pre(req): req['data']['processData']['timeout'] = 1 test_instance_only_activate(agent, responses) start = time.time() event_test(agent, 'docker/instance_deactivate', pre_func=pre, post_func=post) end = time.time() assert end - start > 1 def ping_post_process(req, resp): hostname = Config.hostname() pool_name = hostname + ' Storage Pool' resources = resp['data']['resources'] uuid = 'c861f990-4472-4fa1-960f-65171b544c28' instances = filter(lambda x: x['type'] == 'instance' and x['uuid'] == uuid, resources) assert len(instances) == 1 resources = filter(lambda x: x.get('kind') == 'docker', resources) resources.append(instances[0]) resp['data']['resources'] = resources assert resp['data']['resources'][0]['name'] == hostname assert resp['data']['resources'][1]['name'] == pool_name resp['data']['resources'][0]['name'] = 'localhost' resp['data']['resources'][1]['name'] = 'localhost Storage Pool' @if_docker def test_ping(agent, responses, mocker): mocker.patch.object(HostInfo, 'collect_data', return_value=json_data('docker/host_info_resp')) test_instance_only_activate(agent, responses) CONFIG_OVERRIDE['DOCKER_UUID'] = 'testuuid' CONFIG_OVERRIDE['PHYSICAL_HOST_UUID'] = 'hostuuid' event_test(agent, 'docker/ping', post_func=ping_post_process) @if_docker def test_ping_stat_exception(agent, responses, mocker): mocker.patch.object(HostInfo, 'collect_data', side_effect=ValueError('Bad Value Found')) test_instance_only_activate(agent, responses) CONFIG_OVERRIDE['DOCKER_UUID'] = 'testuuid' CONFIG_OVERRIDE['PHYSICAL_HOST_UUID'] = 'hostuuid' event_test(agent, 'docker/ping_stat_exception', post_func=ping_post_process) @if_docker def test_volume_purge(agent, responses): _delete_container('/c-c861f990-4472-4fa1-960f-65171b544c28') _delete_container('/target_volumes_from') client = docker_client() c = client.create_container('ibuildthecloud/helloworld', volumes=['/volumes_from_path'], name='target_volumes_from') client.start(c) # TODO Figure out a better way to test this. Because purging a volume # means removing it from disk, we run into trouble testing when # boot2docker is in the picture because the locally running agent cannot # see inside the b2d vm. We do currently test this functionality fully # in the integration test suite. event_test(agent, 'docker/volume_purge') def container_field_test_boiler_plate(resp): instance_data = resp['data']['instanceHostMap']['instance']['+data'] del instance_data['dockerInspect'] docker_container = instance_data['dockerContainer'] fields = instance_data['+fields'] del docker_container['Created'] del docker_container['Id'] del docker_container['Status'] del fields['dockerIp'] docker_container = _sort_ports(docker_container) def instance_activate_common_validation(resp): container_field_test_boiler_plate(resp) docker_container = resp['data']['instanceHostMap']['instance'] docker_container = docker_container['+data']['dockerContainer'] fields = resp['data']['instanceHostMap']['instance']['+data']['+fields'] del docker_container['Ports'][0]['PublicPort'] del docker_container['Ports'][1]['PublicPort'] assert fields['dockerPorts']['8080/tcp'] is not None assert fields['dockerPorts']['12201/udp'] is not None fields['dockerPorts']['8080/tcp'] = '1234' fields['dockerPorts']['12201/udp'] = '5678' @if_docker def test_instance_activate_ipsec_network_agent(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_ipsec_network_agent', post_func=post) @if_docker def test_instance_activate_ipsec_lb_agent(agent, responses): _delete_container('/c861f990-4472-4fa1-960f-65171b544c28') def post(req, resp): instance_activate_common_validation(resp) event_test(agent, 'docker/instance_activate_ipsec_lb_agent', post_func=post)
#!/usr/bin/env python # # Copyright 2009 VMware, Inc. # Copyright 2014 Intel Corporation # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sub license, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice (including the # next paragraph) shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. # IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR # ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. VOID = 'x' UNSIGNED = 'u' SIGNED = 's' FLOAT = 'f' ARRAY = 'array' PACKED = 'packed' OTHER = 'other' RGB = 'rgb' SRGB = 'srgb' YUV = 'yuv' ZS = 'zs' def is_power_of_two(x): return not bool(x & (x - 1)) VERY_LARGE = 99999999999999999999999 class Channel: """Describes a color channel.""" def __init__(self, type, norm, size): self.type = type self.norm = norm self.size = size self.sign = type in (SIGNED, FLOAT) self.name = None # Set when the channels are added to the format self.shift = -1 # Set when the channels are added to the format self.index = -1 # Set when the channels are added to the format def __str__(self): s = str(self.type) if self.norm: s += 'n' s += str(self.size) return s def __eq__(self, other): return self.type == other.type and self.norm == other.norm and self.size == other.size def max(self): """Returns the maximum representable number.""" if self.type == FLOAT: return VERY_LARGE if self.norm: return 1 if self.type == UNSIGNED: return (1 << self.size) - 1 if self.type == SIGNED: return (1 << (self.size - 1)) - 1 assert False def min(self): """Returns the minimum representable number.""" if self.type == FLOAT: return -VERY_LARGE if self.type == UNSIGNED: return 0 if self.norm: return -1 if self.type == SIGNED: return -(1 << (self.size - 1)) assert False def one(self): """Returns the value that represents 1.0f.""" if self.type == UNSIGNED: return (1 << self.size) - 1 if self.type == SIGNED: return (1 << (self.size - 1)) - 1 else: return 1 def is_power_of_two(self): """Returns true if the size of this channel is a power of two.""" return is_power_of_two(self.size) class Swizzle: """Describes a swizzle operation. A Swizzle is a mapping from one set of channels in one format to the channels in another. Each channel in the destination format is associated with one of the following constants: * SWIZZLE_X: The first channel in the source format * SWIZZLE_Y: The second channel in the source format * SWIZZLE_Z: The third channel in the source format * SWIZZLE_W: The fourth channel in the source format * SWIZZLE_ZERO: The numeric constant 0 * SWIZZLE_ONE: THe numeric constant 1 * SWIZZLE_NONE: No data available for this channel Sometimes a Swizzle is represented by a 4-character string. In this case, the source channels are represented by the characters "x", "y", "z", and "w"; the numeric constants are represented as "0" and "1"; and no mapping is represented by "_". For instance, the map from luminance-alpha to rgba is given by "xxxy" because each of the three rgb channels maps to the first luminance-alpha channel and the alpha channel maps to second luminance-alpha channel. The mapping from bgr to rgba is given by "zyx1" because the first three colors are reversed and alpha is always 1. """ __identity_str = 'xyzw01_' SWIZZLE_X = 0 SWIZZLE_Y = 1 SWIZZLE_Z = 2 SWIZZLE_W = 3 SWIZZLE_ZERO = 4 SWIZZLE_ONE = 5 SWIZZLE_NONE = 6 def __init__(self, swizzle): """Creates a Swizzle object from a string or array.""" if isinstance(swizzle, str): swizzle = [Swizzle.__identity_str.index(c) for c in swizzle] else: swizzle = list(swizzle) for s in swizzle: assert isinstance(s, int) and 0 <= s and s <= Swizzle.SWIZZLE_NONE assert len(swizzle) <= 4 self.__list = swizzle + [Swizzle.SWIZZLE_NONE] * (4 - len(swizzle)) assert len(self.__list) == 4 def __iter__(self): """Returns an iterator that iterates over this Swizzle. The values that the iterator produces are described by the SWIZZLE_* constants. """ return self.__list.__iter__() def __str__(self): """Returns a string representation of this Swizzle.""" return ''.join(Swizzle.__identity_str[i] for i in self.__list) def __getitem__(self, idx): """Returns the SWIZZLE_* constant for the given destination channel. Valid values for the destination channel include any of the SWIZZLE_* constants or any of the following single-character strings: "x", "y", "z", "w", "r", "g", "b", "a", "z" "s". """ if isinstance(idx, int): assert idx >= Swizzle.SWIZZLE_X and idx <= Swizzle.SWIZZLE_NONE if idx <= Swizzle.SWIZZLE_W: return self.__list.__getitem__(idx) else: return idx elif isinstance(idx, str): if idx in 'xyzw': idx = 'xyzw'.find(idx) elif idx in 'rgba': idx = 'rgba'.find(idx) elif idx in 'zs': idx = 'zs'.find(idx) else: assert False return self.__list.__getitem__(idx) else: assert False def __mul__(self, other): """Returns the composition of this Swizzle with another Swizzle. The resulting swizzle is such that, for any valid input to __getitem__, (a * b)[i] = a[b[i]]. """ assert isinstance(other, Swizzle) return Swizzle(self[x] for x in other) def inverse(self): """Returns a pseudo-inverse of this swizzle. Since swizzling isn't necisaraly a bijection, a Swizzle can never be truely inverted. However, the swizzle returned is *almost* the inverse of this swizzle in the sense that, for each i in range(3), a[a.inverse()[i]] is either i or SWIZZLE_NONE. If swizzle is just a permutation with no channels added or removed, then this function returns the actual inverse. This "pseudo-inverse" idea can be demonstrated by mapping from luminance-alpha to rgba that is given by "xxxy". To get from rgba to lumanence-alpha, we use Swizzle("xxxy").inverse() or "xw__". This maps the first component in the lumanence-alpha texture is the red component of the rgba image and the second to the alpha component, exactly as you would expect. """ rev = [Swizzle.SWIZZLE_NONE] * 4 for i in xrange(4): for j in xrange(4): if self.__list[j] == i and rev[i] == Swizzle.SWIZZLE_NONE: rev[i] = j return Swizzle(rev) class Format: """Describes a pixel format.""" def __init__(self, name, layout, block_width, block_height, channels, swizzle, colorspace): """Constructs a Format from some metadata and a list of channels. The channel objects must be unique to this Format and should not be re-used to construct another Format. This is because certain channel information such as shift, offset, and the channel name are set when the Format is created and are calculated based on the entire list of channels. Arguments: name -- Name of the format such as 'MESA_FORMAT_A8R8G8B8' layout -- One of 'array', 'packed' 'other', or a compressed layout block_width -- The block width if the format is compressed, 1 otherwise block_height -- The block height if the format is compressed, 1 otherwise channels -- A list of Channel objects swizzle -- A Swizzle from this format to rgba colorspace -- one of 'rgb', 'srgb', 'yuv', or 'zs' """ self.name = name self.layout = layout self.block_width = block_width self.block_height = block_height self.channels = channels assert isinstance(swizzle, Swizzle) self.swizzle = swizzle self.name = name assert colorspace in (RGB, SRGB, YUV, ZS) self.colorspace = colorspace # Name the channels chan_names = ['']*4 if self.colorspace in (RGB, SRGB): for (i, s) in enumerate(swizzle): if s < 4: chan_names[s] += 'rgba'[i] elif colorspace == ZS: for (i, s) in enumerate(swizzle): if s < 4: chan_names[s] += 'zs'[i] else: chan_names = ['x', 'y', 'z', 'w'] for c, name in zip(self.channels, chan_names): assert c.name is None if name == 'rgb': c.name = 'l' elif name == 'rgba': c.name = 'i' elif name == '': c.name = 'x' else: c.name = name # Set indices and offsets if self.layout == PACKED: shift = 0 for channel in self.channels: assert channel.shift == -1 channel.shift = shift shift += channel.size for idx, channel in enumerate(self.channels): assert channel.index == -1 channel.index = idx else: pass # Shift means nothing here def __str__(self): return self.name def short_name(self): """Returns a short name for a format. The short name should be suitable to be used as suffix in function names. """ name = self.name if name.startswith('MESA_FORMAT_'): name = name[len('MESA_FORMAT_'):] name = name.lower() return name def block_size(self): """Returns the block size (in bits) of the format.""" size = 0 for channel in self.channels: size += channel.size return size def num_channels(self): """Returns the number of channels in the format.""" nr_channels = 0 for channel in self.channels: if channel.size: nr_channels += 1 return nr_channels def array_element(self): """Returns a non-void channel if this format is an array, otherwise None. If the returned channel is not None, then this format can be considered to be an array of num_channels() channels identical to the returned channel. """ if self.layout == ARRAY: return self.channels[0] elif self.layout == PACKED: ref_channel = self.channels[0] if ref_channel.type == VOID: ref_channel = self.channels[1] for channel in self.channels: if channel.size == 0 or channel.type == VOID: continue if channel.size != ref_channel.size or channel.size % 8 != 0: return None if channel.type != ref_channel.type: return None if channel.norm != ref_channel.norm: return None return ref_channel else: return None def is_array(self): """Returns true if this format can be considered an array format. This function will return true if self.layout == 'array'. However, some formats, such as MESA_FORMAT_A8G8B8R8, can be considered as array formats even though they are technically packed. """ return self.array_element() != None def is_compressed(self): """Returns true if this is a compressed format.""" return self.block_width != 1 or self.block_height != 1 def is_int(self): """Returns true if this format is an integer format. See also: is_norm() """ if self.layout not in (ARRAY, PACKED): return False for channel in self.channels: if channel.type not in (VOID, UNSIGNED, SIGNED): return False return True def is_float(self): """Returns true if this format is an floating-point format.""" if self.layout not in (ARRAY, PACKED): return False for channel in self.channels: if channel.type not in (VOID, FLOAT): return False return True def channel_type(self): """Returns the type of the channels in this format.""" _type = VOID for c in self.channels: if c.type == VOID: continue if _type == VOID: _type = c.type assert c.type == _type return _type def channel_size(self): """Returns the size (in bits) of the channels in this format. This function should only be called if all of the channels have the same size. This is always the case if is_array() returns true. """ size = None for c in self.channels: if c.type == VOID: continue if size is None: size = c.size assert c.size == size return size def max_channel_size(self): """Returns the size of the largest channel.""" size = 0 for c in self.channels: if c.type == VOID: continue size = max(size, c.size) return size def is_normalized(self): """Returns true if this format is normalized. While only integer formats can be normalized, not all integer formats are normalized. Normalized integer formats are those where the integer value is re-interpreted as a fixed point value in the range [0, 1]. """ norm = None for c in self.channels: if c.type == VOID: continue if norm is None: norm = c.norm assert c.norm == norm return norm def has_channel(self, name): """Returns true if this format has the given channel.""" if self.is_compressed(): # Compressed formats are a bit tricky because the list of channels # contains a single channel of type void. Since we don't have any # channel information there, we pull it from the swizzle. if str(self.swizzle) == 'xxxx': return name == 'i' elif str(self.swizzle)[0:3] in ('xxx', 'yyy'): if name == 'l': return True elif name == 'a': return self.swizzle['a'] <= Swizzle.SWIZZLE_W else: return False elif name in 'rgba': return self.swizzle[name] <= Swizzle.SWIZZLE_W else: return False else: for channel in self.channels: if channel.name == name: return True return False def get_channel(self, name): """Returns the channel with the given name if it exists.""" for channel in self.channels: if channel.name == name: return channel return None def _parse_channels(fields, layout, colorspace, swizzle): channels = [] for field in fields: if not field: continue type = field[0] if field[0] else 'x' if field[1] == 'n': norm = True size = int(field[2:]) else: norm = False size = int(field[1:]) channel = Channel(type, norm, size) channels.append(channel) return channels def parse(filename): """Parse a format descrition in CSV format. This function parses the given CSV file and returns an iterable of channels.""" with open(filename) as stream: for line in stream: try: comment = line.index('#') except ValueError: pass else: line = line[:comment] line = line.strip() if not line: continue fields = [field.strip() for field in line.split(',')] name = fields[0] layout = fields[1] block_width = int(fields[2]) block_height = int(fields[3]) colorspace = fields[9] swizzle = Swizzle(fields[8]) channels = _parse_channels(fields[4:8], layout, colorspace, swizzle) yield Format(name, layout, block_width, block_height, channels, swizzle, colorspace)
"""The Qt MainWindow for the QtConsole This is a tabbed pseudo-terminal of Jupyter sessions, with a menu bar for common actions. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import sys import webbrowser from threading import Thread from jupyter_core.paths import jupyter_runtime_dir from pygments.styles import get_all_styles from qtpy import QtGui, QtCore, QtWidgets from qtconsole import styles from qtconsole.jupyter_widget import JupyterWidget from qtconsole.usage import gui_reference def background(f): """call a function in a simple thread, to prevent blocking""" t = Thread(target=f) t.start() return t class MainWindow(QtWidgets.QMainWindow): #--------------------------------------------------------------------------- # 'object' interface #--------------------------------------------------------------------------- def __init__(self, app, confirm_exit=True, new_frontend_factory=None, slave_frontend_factory=None, connection_frontend_factory=None, ): """ Create a tabbed MainWindow for managing FrontendWidgets Parameters ---------- app : reference to QApplication parent confirm_exit : bool, optional Whether we should prompt on close of tabs new_frontend_factory : callable A callable that returns a new JupyterWidget instance, attached to its own running kernel. slave_frontend_factory : callable A callable that takes an existing JupyterWidget, and returns a new JupyterWidget instance, attached to the same kernel. """ super(MainWindow, self).__init__() self._kernel_counter = 0 self._external_kernel_counter = 0 self._app = app self.confirm_exit = confirm_exit self.new_frontend_factory = new_frontend_factory self.slave_frontend_factory = slave_frontend_factory self.connection_frontend_factory = connection_frontend_factory self.tab_widget = QtWidgets.QTabWidget(self) self.tab_widget.setDocumentMode(True) self.tab_widget.setTabsClosable(True) self.tab_widget.tabCloseRequested[int].connect(self.close_tab) self.setCentralWidget(self.tab_widget) # hide tab bar at first, since we have no tabs: self.tab_widget.tabBar().setVisible(False) # prevent focus in tab bar self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus) def update_tab_bar_visibility(self): """ update visibility of the tabBar depending of the number of tab 0 or 1 tab, tabBar hidden 2+ tabs, tabBar visible send a self.close if number of tab ==0 need to be called explicitly, or be connected to tabInserted/tabRemoved """ if self.tab_widget.count() <= 1: self.tab_widget.tabBar().setVisible(False) else: self.tab_widget.tabBar().setVisible(True) if self.tab_widget.count()==0 : self.close() @property def next_kernel_id(self): """constantly increasing counter for kernel IDs""" c = self._kernel_counter self._kernel_counter += 1 return c @property def next_external_kernel_id(self): """constantly increasing counter for external kernel IDs""" c = self._external_kernel_counter self._external_kernel_counter += 1 return c @property def active_frontend(self): return self.tab_widget.currentWidget() def create_tab_with_new_frontend(self): """create a new frontend and attach it to a new tab""" widget = self.new_frontend_factory() self.add_tab_with_frontend(widget) def set_window_title(self): """Set the title of the console window""" old_title = self.windowTitle() title, ok = QtWidgets.QInputDialog.getText(self, "Rename Window", "New title:", text=old_title) if ok: self.setWindowTitle(title) def create_tab_with_existing_kernel(self): """create a new frontend attached to an external kernel in a new tab""" connection_file, file_type = QtWidgets.QFileDialog.getOpenFileName(self, "Connect to Existing Kernel", jupyter_runtime_dir(), "Connection file (*.json)") if not connection_file: return widget = self.connection_frontend_factory(connection_file) name = "external {}".format(self.next_external_kernel_id) self.add_tab_with_frontend(widget, name=name) def create_tab_with_current_kernel(self): """create a new frontend attached to the same kernel as the current tab""" current_widget = self.tab_widget.currentWidget() current_widget_index = self.tab_widget.indexOf(current_widget) current_widget_name = self.tab_widget.tabText(current_widget_index) widget = self.slave_frontend_factory(current_widget) if 'slave' in current_widget_name: # don't keep stacking slaves name = current_widget_name else: name = '(%s) slave' % current_widget_name self.add_tab_with_frontend(widget,name=name) def set_tab_title(self): """Set the title of the current tab""" old_title = self.tab_widget.tabText(self.tab_widget.currentIndex()) title, ok = QtWidgets.QInputDialog.getText(self, "Rename Tab", "New title:", text=old_title) if ok: self.tab_widget.setTabText(self.tab_widget.currentIndex(), title) def close_tab(self,current_tab): """ Called when you need to try to close a tab. It takes the number of the tab to be closed as argument, or a reference to the widget inside this tab """ # let's be sure "tab" and "closing widget" are respectively the index # of the tab to close and a reference to the frontend to close if type(current_tab) is not int : current_tab = self.tab_widget.indexOf(current_tab) closing_widget=self.tab_widget.widget(current_tab) # when trying to be closed, widget might re-send a request to be # closed again, but will be deleted when event will be processed. So # need to check that widget still exists and skip if not. One example # of this is when 'exit' is sent in a slave tab. 'exit' will be # re-sent by this function on the master widget, which ask all slave # widgets to exit if closing_widget is None: return #get a list of all slave widgets on the same kernel. slave_tabs = self.find_slave_widgets(closing_widget) keepkernel = None #Use the prompt by default if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic keepkernel = closing_widget._keep_kernel_on_exit # If signal sent by exit magic (_keep_kernel_on_exit, exist and not None) # we set local slave tabs._hidden to True to avoid prompting for kernel # restart when they get the signal. and then "forward" the 'exit' # to the main window if keepkernel is not None: for tab in slave_tabs: tab._hidden = True if closing_widget in slave_tabs: try : self.find_master_tab(closing_widget).execute('exit') except AttributeError: self.log.info("Master already closed or not local, closing only current tab") self.tab_widget.removeTab(current_tab) self.update_tab_bar_visibility() return kernel_client = closing_widget.kernel_client kernel_manager = closing_widget.kernel_manager if keepkernel is None and not closing_widget._confirm_exit: # don't prompt, just terminate the kernel if we own it # or leave it alone if we don't keepkernel = closing_widget._existing if keepkernel is None: #show prompt if kernel_client and kernel_client.channels_running: title = self.window().windowTitle() cancel = QtWidgets.QMessageBox.Cancel okay = QtWidgets.QMessageBox.Ok if closing_widget._may_close: msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"' info = "Would you like to quit the Kernel and close all attached Consoles as well?" justthis = QtWidgets.QPushButton("&No, just this Tab", self) justthis.setShortcut('N') closeall = QtWidgets.QPushButton("&Yes, close all", self) closeall.setShortcut('Y') # allow ctrl-d ctrl-d exit, like in terminal closeall.setShortcut('Ctrl+D') box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Question, title, msg) box.setInformativeText(info) box.addButton(cancel) box.addButton(justthis, QtWidgets.QMessageBox.NoRole) box.addButton(closeall, QtWidgets.QMessageBox.YesRole) box.setDefaultButton(closeall) box.setEscapeButton(cancel) pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64))) box.setIconPixmap(pixmap) reply = box.exec_() if reply == 1: # close All for slave in slave_tabs: background(slave.kernel_client.stop_channels) self.tab_widget.removeTab(self.tab_widget.indexOf(slave)) kernel_manager.shutdown_kernel() self.tab_widget.removeTab(current_tab) background(kernel_client.stop_channels) elif reply == 0: # close Console if not closing_widget._existing: # Have kernel: don't quit, just close the tab closing_widget.execute("exit True") self.tab_widget.removeTab(current_tab) background(kernel_client.stop_channels) else: reply = QtWidgets.QMessageBox.question(self, title, "Are you sure you want to close this Console?"+ "\nThe Kernel and other Consoles will remain active.", okay|cancel, defaultButton=okay ) if reply == okay: self.tab_widget.removeTab(current_tab) elif keepkernel: #close console but leave kernel running (no prompt) self.tab_widget.removeTab(current_tab) background(kernel_client.stop_channels) else: #close console and kernel (no prompt) self.tab_widget.removeTab(current_tab) if kernel_client and kernel_client.channels_running: for slave in slave_tabs: background(slave.kernel_client.stop_channels) self.tab_widget.removeTab(self.tab_widget.indexOf(slave)) if kernel_manager: kernel_manager.shutdown_kernel() background(kernel_client.stop_channels) self.update_tab_bar_visibility() def add_tab_with_frontend(self,frontend,name=None): """ insert a tab with a given frontend in the tab bar, and give it a name """ if not name: name = 'kernel %i' % self.next_kernel_id self.tab_widget.addTab(frontend,name) self.update_tab_bar_visibility() self.make_frontend_visible(frontend) frontend.exit_requested.connect(self.close_tab) def next_tab(self): self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1)) def prev_tab(self): self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1)) def make_frontend_visible(self,frontend): widget_index=self.tab_widget.indexOf(frontend) if widget_index > 0 : self.tab_widget.setCurrentIndex(widget_index) def find_master_tab(self,tab,as_list=False): """ Try to return the frontend that owns the kernel attached to the given widget/tab. Only finds frontend owned by the current application. Selection based on port of the kernel might be inaccurate if several kernel on different ip use same port number. This function does the conversion tabNumber/widget if needed. Might return None if no master widget (non local kernel) Will crash if more than 1 masterWidget When asList set to True, always return a list of widget(s) owning the kernel. The list might be empty or containing several Widget. """ #convert from/to int/richIpythonWidget if needed if isinstance(tab, int): tab = self.tab_widget.widget(tab) km=tab.kernel_client #build list of all widgets widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())] # widget that are candidate to be the owner of the kernel does have all the same port of the curent widget # And should have a _may_close attribute filtered_widget_list = [ widget for widget in widget_list if widget.kernel_client.connection_file == km.connection_file and hasattr(widget,'_may_close') ] # the master widget is the one that may close the kernel master_widget= [ widget for widget in filtered_widget_list if widget._may_close] if as_list: return master_widget assert(len(master_widget)<=1 ) if len(master_widget)==0: return None return master_widget[0] def find_slave_widgets(self,tab): """return all the frontends that do not own the kernel attached to the given widget/tab. Only find frontends owned by the current application. Selection based on connection file of the kernel. This function does the conversion tabNumber/widget if needed. """ #convert from/to int/richIpythonWidget if needed if isinstance(tab, int): tab = self.tab_widget.widget(tab) km=tab.kernel_client #build list of all widgets widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())] # widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget filtered_widget_list = ( widget for widget in widget_list if widget.kernel_client.connection_file == km.connection_file) # Get a list of all widget owning the same kernel and removed it from # the previous cadidate. (better using sets ?) master_widget_list = self.find_master_tab(tab, as_list=True) slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list] return slave_list # Populate the menu bar with common actions and shortcuts def add_menu_action(self, menu, action, defer_shortcut=False): """Add action to menu as well as self So that when the menu bar is invisible, its actions are still available. If defer_shortcut is True, set the shortcut context to widget-only, where it will avoid conflict with shortcuts already bound to the widgets themselves. """ menu.addAction(action) self.addAction(action) if defer_shortcut: action.setShortcutContext(QtCore.Qt.WidgetShortcut) def init_menu_bar(self): #create menu in the order they should appear in the menu bar self.init_file_menu() self.init_edit_menu() self.init_view_menu() self.init_kernel_menu() self.init_window_menu() self.init_help_menu() def init_file_menu(self): self.file_menu = self.menuBar().addMenu("&File") self.new_kernel_tab_act = QtWidgets.QAction("New Tab with &New kernel", self, shortcut="Ctrl+T", triggered=self.create_tab_with_new_frontend) self.add_menu_action(self.file_menu, self.new_kernel_tab_act) self.slave_kernel_tab_act = QtWidgets.QAction("New Tab with Sa&me kernel", self, shortcut="Ctrl+Shift+T", triggered=self.create_tab_with_current_kernel) self.add_menu_action(self.file_menu, self.slave_kernel_tab_act) self.existing_kernel_tab_act = QtWidgets.QAction("New Tab with &Existing kernel", self, shortcut="Alt+T", triggered=self.create_tab_with_existing_kernel) self.add_menu_action(self.file_menu, self.existing_kernel_tab_act) self.file_menu.addSeparator() self.close_action=QtWidgets.QAction("&Close Tab", self, shortcut=QtGui.QKeySequence.Close, triggered=self.close_active_frontend ) self.add_menu_action(self.file_menu, self.close_action) self.export_action=QtWidgets.QAction("&Save to HTML/XHTML", self, shortcut=QtGui.QKeySequence.Save, triggered=self.export_action_active_frontend ) self.add_menu_action(self.file_menu, self.export_action, True) self.file_menu.addSeparator() printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print) if printkey.matches("Ctrl+P") and sys.platform != 'darwin': # Only override the default if there is a collision. # Qt ctrl = cmd on OSX, so the match gets a false positive on OSX. printkey = "Ctrl+Shift+P" self.print_action = QtWidgets.QAction("&Print", self, shortcut=printkey, triggered=self.print_action_active_frontend) self.add_menu_action(self.file_menu, self.print_action, True) if sys.platform != 'darwin': # OSX always has Quit in the Application menu, only add it # to the File menu elsewhere. self.file_menu.addSeparator() self.quit_action = QtWidgets.QAction("&Quit", self, shortcut=QtGui.QKeySequence.Quit, triggered=self.close, ) self.add_menu_action(self.file_menu, self.quit_action) def init_edit_menu(self): self.edit_menu = self.menuBar().addMenu("&Edit") self.undo_action = QtWidgets.QAction("&Undo", self, shortcut=QtGui.QKeySequence.Undo, statusTip="Undo last action if possible", triggered=self.undo_active_frontend ) self.add_menu_action(self.edit_menu, self.undo_action) self.redo_action = QtWidgets.QAction("&Redo", self, shortcut=QtGui.QKeySequence.Redo, statusTip="Redo last action if possible", triggered=self.redo_active_frontend) self.add_menu_action(self.edit_menu, self.redo_action) self.edit_menu.addSeparator() self.cut_action = QtWidgets.QAction("&Cut", self, shortcut=QtGui.QKeySequence.Cut, triggered=self.cut_active_frontend ) self.add_menu_action(self.edit_menu, self.cut_action, True) self.copy_action = QtWidgets.QAction("&Copy", self, shortcut=QtGui.QKeySequence.Copy, triggered=self.copy_active_frontend ) self.add_menu_action(self.edit_menu, self.copy_action, True) self.copy_raw_action = QtWidgets.QAction("Copy (&Raw Text)", self, shortcut="Ctrl+Shift+C", triggered=self.copy_raw_active_frontend ) self.add_menu_action(self.edit_menu, self.copy_raw_action, True) self.paste_action = QtWidgets.QAction("&Paste", self, shortcut=QtGui.QKeySequence.Paste, triggered=self.paste_active_frontend ) self.add_menu_action(self.edit_menu, self.paste_action, True) self.edit_menu.addSeparator() selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll) if selectall.matches("Ctrl+A") and sys.platform != 'darwin': # Only override the default if there is a collision. # Qt ctrl = cmd on OSX, so the match gets a false positive on OSX. selectall = "Ctrl+Shift+A" self.select_all_action = QtWidgets.QAction("Select Cell/&All", self, shortcut=selectall, triggered=self.select_all_active_frontend ) self.add_menu_action(self.edit_menu, self.select_all_action, True) def init_view_menu(self): self.view_menu = self.menuBar().addMenu("&View") if sys.platform != 'darwin': # disable on OSX, where there is always a menu bar self.toggle_menu_bar_act = QtWidgets.QAction("Toggle &Menu Bar", self, shortcut="Ctrl+Shift+M", statusTip="Toggle visibility of menubar", triggered=self.toggle_menu_bar) self.add_menu_action(self.view_menu, self.toggle_menu_bar_act) fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11" self.full_screen_act = QtWidgets.QAction("&Full Screen", self, shortcut=fs_key, statusTip="Toggle between Fullscreen and Normal Size", triggered=self.toggleFullScreen) self.add_menu_action(self.view_menu, self.full_screen_act) self.view_menu.addSeparator() self.increase_font_size = QtWidgets.QAction("Zoom &In", self, shortcut=QtGui.QKeySequence.ZoomIn, triggered=self.increase_font_size_active_frontend ) self.add_menu_action(self.view_menu, self.increase_font_size, True) self.decrease_font_size = QtWidgets.QAction("Zoom &Out", self, shortcut=QtGui.QKeySequence.ZoomOut, triggered=self.decrease_font_size_active_frontend ) self.add_menu_action(self.view_menu, self.decrease_font_size, True) self.reset_font_size = QtWidgets.QAction("Zoom &Reset", self, shortcut="Ctrl+0", triggered=self.reset_font_size_active_frontend ) self.add_menu_action(self.view_menu, self.reset_font_size, True) self.view_menu.addSeparator() self.clear_action = QtWidgets.QAction("&Clear Screen", self, shortcut='Ctrl+L', statusTip="Clear the console", triggered=self.clear_active_frontend) self.add_menu_action(self.view_menu, self.clear_action) self.pager_menu = self.view_menu.addMenu("&Pager") hsplit_action = QtWidgets.QAction(".. &Horizontal Split", self, triggered=lambda: self.set_paging_active_frontend('hsplit')) vsplit_action = QtWidgets.QAction(" : &Vertical Split", self, triggered=lambda: self.set_paging_active_frontend('vsplit')) inside_action = QtWidgets.QAction(" &Inside Pager", self, triggered=lambda: self.set_paging_active_frontend('inside')) self.pager_menu.addAction(hsplit_action) self.pager_menu.addAction(vsplit_action) self.pager_menu.addAction(inside_action) available_syntax_styles = self.get_available_syntax_styles() if len(available_syntax_styles) > 0: self.syntax_style_menu = self.view_menu.addMenu("&Syntax Style") style_group = QtWidgets.QActionGroup(self) for style in available_syntax_styles: action = QtWidgets.QAction("{}".format(style), self, triggered=lambda v, syntax_style=style: self.set_syntax_style( syntax_style=syntax_style)) action.setCheckable(True) style_group.addAction(action) self.syntax_style_menu.addAction(action) if style == 'default': action.setChecked(True) self.syntax_style_menu.setDefaultAction(action) def init_kernel_menu(self): self.kernel_menu = self.menuBar().addMenu("&Kernel") # Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl # keep the signal shortcuts to ctrl, rather than # platform-default like we do elsewhere. ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl" self.interrupt_kernel_action = QtWidgets.QAction("&Interrupt current Kernel", self, triggered=self.interrupt_kernel_active_frontend, shortcut=ctrl+"+C", ) self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action) self.restart_kernel_action = QtWidgets.QAction("&Restart current Kernel", self, triggered=self.restart_kernel_active_frontend, shortcut=ctrl+"+.", ) self.add_menu_action(self.kernel_menu, self.restart_kernel_action) self.kernel_menu.addSeparator() self.confirm_restart_kernel_action = QtWidgets.QAction("&Confirm kernel restart", self, checkable=True, checked=self.active_frontend.confirm_restart, triggered=self.toggle_confirm_restart_active_frontend ) self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action) self.tab_widget.currentChanged.connect(self.update_restart_checkbox) def init_window_menu(self): self.window_menu = self.menuBar().addMenu("&Window") if sys.platform == 'darwin': # add min/maximize actions to OSX, which lacks default bindings. self.minimizeAct = QtWidgets.QAction("Mini&mize", self, shortcut="Ctrl+m", statusTip="Minimize the window/Restore Normal Size", triggered=self.toggleMinimized) # maximize is called 'Zoom' on OSX for some reason self.maximizeAct = QtWidgets.QAction("&Zoom", self, shortcut="Ctrl+Shift+M", statusTip="Maximize the window/Restore Normal Size", triggered=self.toggleMaximized) self.add_menu_action(self.window_menu, self.minimizeAct) self.add_menu_action(self.window_menu, self.maximizeAct) self.window_menu.addSeparator() prev_key = "Ctrl+Alt+Left" if sys.platform == 'darwin' else "Ctrl+PgUp" self.prev_tab_act = QtWidgets.QAction("Pre&vious Tab", self, shortcut=prev_key, statusTip="Select previous tab", triggered=self.prev_tab) self.add_menu_action(self.window_menu, self.prev_tab_act) next_key = "Ctrl+Alt+Right" if sys.platform == 'darwin' else "Ctrl+PgDown" self.next_tab_act = QtWidgets.QAction("Ne&xt Tab", self, shortcut=next_key, statusTip="Select next tab", triggered=self.next_tab) self.add_menu_action(self.window_menu, self.next_tab_act) self.rename_window_act = QtWidgets.QAction("Rename &Window", self, shortcut="Alt+R", statusTip="Rename window", triggered=self.set_window_title) self.add_menu_action(self.window_menu, self.rename_window_act) self.rename_current_tab_act = QtWidgets.QAction("&Rename Current Tab", self, shortcut="Ctrl+R", statusTip="Rename current tab", triggered=self.set_tab_title) self.add_menu_action(self.window_menu, self.rename_current_tab_act) def init_help_menu(self): # please keep the Help menu in Mac Os even if empty. It will # automatically contain a search field to search inside menus and # please keep it spelled in English, as long as Qt Doesn't support # a QAction.MenuRole like HelpMenuRole otherwise it will lose # this search field functionality self.help_menu = self.menuBar().addMenu("&Help") # Help Menu self.help_action = QtWidgets.QAction("Show &QtConsole help", self, triggered=self._show_help) self.online_help_action = QtWidgets.QAction("Open online &help", self, triggered=self._open_online_help) self.add_menu_action(self.help_menu, self.help_action) self.add_menu_action(self.help_menu, self.online_help_action) def _set_active_frontend_focus(self): # this is a hack, self.active_frontend._control seems to be # a private member. Unfortunately this is the only method # to set focus reliably QtCore.QTimer.singleShot(200, self.active_frontend._control.setFocus) # minimize/maximize/fullscreen actions: def toggle_menu_bar(self): menu_bar = self.menuBar() if menu_bar.isVisible(): menu_bar.setVisible(False) else: menu_bar.setVisible(True) def toggleMinimized(self): if not self.isMinimized(): self.showMinimized() else: self.showNormal() def _show_help(self): self.active_frontend._page(gui_reference) def _open_online_help(self): webbrowser.open("https://qtconsole.readthedocs.io", new=1, autoraise=True) def toggleMaximized(self): if not self.isMaximized(): self.showMaximized() else: self.showNormal() # Min/Max imizing while in full screen give a bug # when going out of full screen, at least on OSX def toggleFullScreen(self): if not self.isFullScreen(): self.showFullScreen() if sys.platform == 'darwin': self.maximizeAct.setEnabled(False) self.minimizeAct.setEnabled(False) else: self.showNormal() if sys.platform == 'darwin': self.maximizeAct.setEnabled(True) self.minimizeAct.setEnabled(True) def set_paging_active_frontend(self, paging): self.active_frontend._set_paging(paging) def get_available_syntax_styles(self): """Get a list with the syntax styles available.""" styles = list(get_all_styles()) return sorted(styles) def set_syntax_style(self, syntax_style): """Set up syntax style for the current console.""" if syntax_style=='bw': colors='nocolor' elif styles.dark_style(syntax_style): colors='linux' else: colors='lightbg' self.active_frontend.syntax_style = syntax_style style_sheet = styles.sheet_from_template(syntax_style, colors) self.active_frontend.style_sheet = style_sheet self.active_frontend._syntax_style_changed() self.active_frontend._style_sheet_changed() self.active_frontend.reset(clear=True) self.active_frontend._execute("%colors linux", True) def close_active_frontend(self): self.close_tab(self.active_frontend) def restart_kernel_active_frontend(self): self.active_frontend.request_restart_kernel() def interrupt_kernel_active_frontend(self): self.active_frontend.request_interrupt_kernel() def toggle_confirm_restart_active_frontend(self): widget = self.active_frontend widget.confirm_restart = not widget.confirm_restart self.confirm_restart_kernel_action.setChecked(widget.confirm_restart) def update_restart_checkbox(self): if self.active_frontend is None: return widget = self.active_frontend self.confirm_restart_kernel_action.setChecked(widget.confirm_restart) def clear_active_frontend(self): self.active_frontend.clear() def cut_active_frontend(self): widget = self.active_frontend if widget.can_cut(): widget.cut() def copy_active_frontend(self): widget = self.active_frontend widget.copy() def copy_raw_active_frontend(self): self.active_frontend._copy_raw_action.trigger() def paste_active_frontend(self): widget = self.active_frontend if widget.can_paste(): widget.paste() def undo_active_frontend(self): self.active_frontend.undo() def redo_active_frontend(self): self.active_frontend.redo() def print_action_active_frontend(self): self.active_frontend.print_action.trigger() def export_action_active_frontend(self): self.active_frontend.export_action.trigger() def select_all_active_frontend(self): self.active_frontend.select_all_action.trigger() def increase_font_size_active_frontend(self): self.active_frontend.increase_font_size.trigger() def decrease_font_size_active_frontend(self): self.active_frontend.decrease_font_size.trigger() def reset_font_size_active_frontend(self): self.active_frontend.reset_font_size.trigger() #--------------------------------------------------------------------------- # QWidget interface #--------------------------------------------------------------------------- def closeEvent(self, event): """ Forward the close event to every tabs contained by the windows """ if self.tab_widget.count() == 0: # no tabs, just close event.accept() return # Do Not loop on the widget count as it change while closing title = self.window().windowTitle() cancel = QtWidgets.QMessageBox.Cancel okay = QtWidgets.QMessageBox.Ok accept_role = QtWidgets.QMessageBox.AcceptRole if self.confirm_exit: if self.tab_widget.count() > 1: msg = "Close all tabs, stop all kernels, and Quit?" else: msg = "Close console, stop kernel, and Quit?" info = "Kernels not started here (e.g. notebooks) will be left alone." closeall = QtWidgets.QPushButton("&Quit", self) closeall.setShortcut('Q') box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Question, title, msg) box.setInformativeText(info) box.addButton(cancel) box.addButton(closeall, QtWidgets.QMessageBox.YesRole) box.setDefaultButton(closeall) box.setEscapeButton(cancel) pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64))) box.setIconPixmap(pixmap) reply = box.exec_() else: reply = okay if reply == cancel: event.ignore() return if reply == okay or reply == accept_role: while self.tab_widget.count() >= 1: # prevent further confirmations: widget = self.active_frontend widget._confirm_exit = False self.close_tab(widget) event.accept()
#!/usr/bin/env python import dbus import subprocess import sys import os import optparse import re from dbus.mainloop.glib import DBusGMainLoop import glib, gobject # Either pass the bluetooth address of the network access point to connect to # with the -b flag, or store it in the config file in the form: # bdaddr = 00:11:22:AA:BB:CC def_config_file = '~/.blue-tether' dhcp_clients = [ ['/sbin/dhclient', '-v', '-d'], ['/sbin/udhcpc', '-f', '-i'], ] # FUCKING BLUEZ DEVS! # In Bluez 4, this is 'org.bluez.Network' # In Bluez 5, this is 'org.bluez.Network1' # And the DBUS API means we won't find out until later if we got it wrong! # Currently relying on another Bluez 5 breakage to switch this string... # TODO: introspect which is in use instead BLUEZ_NETWORK_IFACE = 'org.bluez.Network' def process_config_file(opts, parser): opts.config = os.path.expanduser(opts.config) if not os.path.isfile(opts.config): return with open(opts.config, 'r') as f: for line in f: line = line.split('#', 1)[0].strip() if line == '': continue try: (opt, val) = map(str.strip, line.split('=', 1)) except ValueError: parser.error('Badly formatted line in condfig file: %s' % line) try: if getattr(opts, opt) == None: val = parser.get_option('--%s'%opt).convert_value(opt, val) setattr(opts, opt, val) except AttributeError: parser.error('Unknown option in config file: %s' % opt) def get_config(): def check_bdaddr(): import re if opts.bdaddr is None: parser.error( 'bdaddr must be specified with -b or in %s' % opts.config) if re.match('([0-9a-fA-F]{2}(:(?=.)|$)){6}$', opts.bdaddr) is None: parser.error('bdaddr in wrong format') parser = optparse.OptionParser() parser.add_option('-a', '--adapter', help='Which bluetooth adapter to use if more than one are available') parser.add_option('-b', '--bdaddr', help='Bluetooth address of the network access point to connect to') parser.add_option('-r', '--reconnect', type='int', metavar='SECONDS', help='Attempt to reconnect every SECONDS if the network goes down') parser.add_option('-c', '--config', default=def_config_file, help='Process this config file (%default)') (opts, args) = parser.parse_args() if len(args): parser.error('Too many arguments') process_config_file(opts, parser) check_bdaddr() return opts class dhcp_client(object): proc = None # In case of race between __init__ and __del__ def __init__(self, cmd, interface): import atexit print 'Starting DHCP client...' self.proc = subprocess.Popen(cmd + [interface], stdout=sys.stdout) atexit.register(self.cleanup) def cleanup(self): if self.proc: print 'Stopping DHCP client...' self.proc.kill() self.proc.wait() self.proc = None def __del__(self): print 'DHCP DEL' self.cleanup() @staticmethod def exists(cmd): return os.path.exists(cmd[0]) def start_dhcp(interface): for cmd in dhcp_clients: if dhcp_client.exists(cmd): return dhcp_client(cmd, interface) print 'Unable to locate DHCP Client' class BluezNetMonitor(object): def __init__(self, bus, bd_path, main_loop, reconnect = None): self.bus = bus self.bd_path = bd_path self.main_loop = main_loop self.reconnect = reconnect self.props = { 'Connected': 0, 'Interface': '', 'UUID': '', } self.Connected = property(lambda: self.props['Connected'], lambda x: self.props.__setitem__('Connected', x)) self.Interface = property(lambda: self.props['Interface'], lambda x: self.props.__setitem__('Interface', x)) self.dhcp = None self.is_up = False self.dev_proxy = self.bus.get_object('org.bluez', self.bd_path) self.dev_network = dbus.Interface(self.dev_proxy, BLUEZ_NETWORK_IFACE) # More Bluez 4/5 differences: bus.add_signal_receiver(self.property_changed_callback, 'PropertyChanged', BLUEZ_NETWORK_IFACE, None, bd_path) bus.add_signal_receiver(self.properties_changed_callback_bluez5, 'PropertiesChanged', 'org.freedesktop.DBus.Properties', None, bd_path) self.connect() def property_changed_callback(self, prop, val): print 'Property Changed: %s: %s' % (prop, val) if prop == 'Interface' and self.is_up and val != self.Interface: self.down() self.props[prop] = val if prop == 'Connected': val and self.up() val or self.down() def properties_changed_callback_bluez5(self, interface, objects, signature): print 'Properties Changed (Bluez 5): %s: %s (%s)' % (interface, objects, signature) if interface != BLUEZ_NETWORK_IFACE: return if 'Interface' in objects: if self.is_up and objects['Interface'] != self.Interface: self.down() self.props['Interface'] = objects['Interface'] if 'Connected' in objects: self.props['Connected'] = objects['Connected'] objects['Connected'] and self.up() objects['Connected'] or self.down() def up(self): if self.is_up: print 'ALREADY UP' return print 'UP' self.is_up = True assert(self.dhcp is None) self.dhcp = start_dhcp(self.Interface) def down(self): if self.is_up == False: print 'ALREADY DOWN' return print 'DOWN' self.is_up = False if self.dhcp is not None: self.dhcp.cleanup() self.dhcp = None self.no_connection() def no_connection(self): if self.reconnect is None: print 'Auto reconnect disabled, quitting...' self.main_loop.quit() else: print 'Will retry every %d seconds' % self.reconnect glib.timeout_add(self.reconnect * 1000, self._connect) def _connect(self): print 'Connecting...' try: self.Interface = self.dev_network.Connect('NAP') # 'GN' / 'NAP' ? print '%s created' % self.Interface return 0 except dbus.exceptions.DBusException as e: print 'Error Connecting: %s' % e if self.reconnect is None: raise return 1 def connect(self): ret = self._connect() if ret: self.no_connection() return ret def disconnect(self): print 'Disconnecting...' self.dev_network.Disconnect() class AdapterNotFound(Exception): pass def fucking_bluez_devs_hate_backwards_compatibility_what_bloody_arseholes(bus, adapter=None): global BLUEZ_NETWORK_IFACE bluez_proxy = bus.get_object('org.bluez', '/') try: # the old method that worked in Bluez 4 (of course, the bluez 3 method was broken years ago): bluez_manager = dbus.Interface(bluez_proxy, 'org.bluez.Manager') if adapter is None: return bluez_manager.DefaultAdapter() return bluez_manager.FindAdapter(adapter) except dbus.exceptions.DBusException: print('Bluez developers SUCK!') BLUEZ_NETWORK_IFACE = 'org.bluez.Network1' # There's probably a more direct way to get this info built into the Python # DBUS API... manager = dbus.Interface(bluez_proxy, 'org.freedesktop.DBus.ObjectManager') objects = manager.GetManagedObjects() pattern = re.compile(r'/org/bluez/[^/]+$') adapters = filter(lambda x: pattern.match(x), objects.keys()) if adapter is None: return sorted(adapters)[0] for path in adapters: proxy = bus.get_object('org.bluez', path) # Probably a more direct way to do this too... properties = dbus.Interface(proxy, dbus.PROPERTIES_IFACE) address = properties.Get('org.bluez.Adapter1', 'Address') if address.lower() == adapter.lower(): return path raise AdapterNotFound(adapter) def main(): opts = get_config() #dbus.glib.threads_init() #glib.theads_init() bus_loop = DBusGMainLoop(set_as_default = True) main_loop = glib.MainLoop() bus = dbus.SystemBus() # FIXME: BROKEN IN BLUEZ 5!!!! adapter = fucking_bluez_devs_hate_backwards_compatibility_what_bloody_arseholes(bus, opts.adapter) bd_path = '%s/dev_%s' % (adapter, opts.bdaddr.upper().replace(':', '_')) def input_callback(*args): # print 'INPUT: %s' % repr(args) main_loop.quit() return True # What return value signifies what? glib.io_add_watch(sys.stdin, glib.IO_IN, input_callback) try: bluez_net_monitor = BluezNetMonitor(bus, bd_path, main_loop, opts.reconnect) except dbus.exceptions.DBusException as e: return 1 print 'Press enter to close connection' main_loop.run() # Bluez 5 now seem to require manual disconnection... probably for the best, # but an incompatible change none the less: bluez_net_monitor.disconnect() if __name__ == '__main__': main() # vim:expandtab:ts=2:sw=2
# -*- coding: utf-8 -*- """ Unit tests for the spectral module. :copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`. :license: Modified BSD, see LICENSE.txt for details. """ import unittest import numpy as np import scipy.signal as spsig import quantities as pq import neo.core as n from numpy.testing import assert_array_almost_equal, assert_array_equal import elephant.spectral class WelchPSDTestCase(unittest.TestCase): def test_welch_psd_errors(self): # generate a dummy data data = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, units='mV') # check for invalid parameter values # - length of segments self.assertRaises(ValueError, elephant.spectral.welch_psd, data, len_seg=0) self.assertRaises(ValueError, elephant.spectral.welch_psd, data, len_seg=data.shape[0] * 2) # - number of segments self.assertRaises(ValueError, elephant.spectral.welch_psd, data, num_seg=0) self.assertRaises(ValueError, elephant.spectral.welch_psd, data, num_seg=data.shape[0] * 2) # - frequency resolution self.assertRaises(ValueError, elephant.spectral.welch_psd, data, freq_res=-1) self.assertRaises(ValueError, elephant.spectral.welch_psd, data, freq_res=data.sampling_rate / (data.shape[0] + 1)) # - overlap self.assertRaises(ValueError, elephant.spectral.welch_psd, data, overlap=-1.0) self.assertRaises(ValueError, elephant.spectral.welch_psd, data, overlap=1.1) def test_welch_psd_behavior(self): # generate data by adding white noise and a sinusoid data_length = 5000 sampling_period = 0.001 signal_freq = 100.0 noise = np.random.normal(size=data_length) signal = [np.sin(2 * np.pi * signal_freq * t) for t in np.arange(0, data_length * sampling_period, sampling_period)] data = n.AnalogSignal(np.array(signal + noise), sampling_period=sampling_period * pq.s, units='mV') # consistency between different ways of specifying segment length freqs1, psd1 = elephant.spectral.welch_psd( data, len_segment=data_length // 5, overlap=0) freqs2, psd2 = elephant.spectral.welch_psd( data, n_segments=5, overlap=0) self.assertTrue((psd1 == psd2).all() and (freqs1 == freqs2).all()) # frequency resolution and consistency with data freq_res = 1.0 * pq.Hz freqs, psd = elephant.spectral.welch_psd( data, frequency_resolution=freq_res) self.assertAlmostEqual(freq_res, freqs[1] - freqs[0]) self.assertEqual(freqs[psd.argmax()], signal_freq) freqs_np, psd_np = elephant.spectral.welch_psd( data.magnitude.flatten(), fs=1 / sampling_period, frequency_resolution=freq_res) self.assertTrue((freqs == freqs_np).all() and (psd == psd_np).all()) # check of scipy.signal.welch() parameters params = {'window': 'hamming', 'nfft': 1024, 'detrend': 'linear', 'return_onesided': False, 'scaling': 'spectrum'} for key, val in params.items(): freqs, psd = elephant.spectral.welch_psd( data, len_segment=1000, overlap=0, **{key: val}) freqs_spsig, psd_spsig = spsig.welch(np.rollaxis(data, 0, len( data.shape)), fs=1 / sampling_period, nperseg=1000, noverlap=0, **{key: val}) self.assertTrue( (freqs == freqs_spsig).all() and ( psd == psd_spsig).all()) # - generate multidimensional data for check of parameter `axis` num_channel = 4 data_length = 5000 data_multidim = np.random.normal(size=(num_channel, data_length)) freqs, psd = elephant.spectral.welch_psd(data_multidim) freqs_T, psd_T = elephant.spectral.welch_psd(data_multidim.T, axis=0) self.assertTrue(np.all(freqs == freqs_T)) self.assertTrue(np.all(psd == psd_T.T)) def test_welch_psd_input_types(self): # generate a test data sampling_period = 0.001 data = n.AnalogSignal(np.array(np.random.normal(size=5000)), sampling_period=sampling_period * pq.s, units='mV') # outputs from AnalogSignal input are of Quantity type (standard usage) freqs_neo, psd_neo = elephant.spectral.welch_psd(data) self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity)) self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity)) # outputs from Quantity array input are of Quantity type freqs_pq, psd_pq = elephant.spectral.welch_psd( data.magnitude.flatten() * data.units, fs=1 / sampling_period) self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity)) self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity)) # outputs from Numpy ndarray input are NOT of Quantity type freqs_np, psd_np = elephant.spectral.welch_psd( data.magnitude.flatten(), fs=1 / sampling_period) self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity)) self.assertFalse(isinstance(psd_np, pq.quantity.Quantity)) # check if the results from different input types are identical self.assertTrue( (freqs_neo == freqs_pq).all() and ( psd_neo == psd_pq).all()) self.assertTrue( (freqs_neo == freqs_np).all() and ( psd_neo == psd_np).all()) def test_welch_psd_multidim_input(self): # generate multidimensional data num_channel = 4 data_length = 5000 sampling_period = 0.001 noise = np.random.normal(size=(num_channel, data_length)) data_np = np.array(noise) # Since row-column order in AnalogSignal is different from the # conventional one, `data_np` needs to be transposed when its used to # define an AnalogSignal data_neo = n.AnalogSignal(data_np.T, sampling_period=sampling_period * pq.s, units='mV') data_neo_1dim = n.AnalogSignal(data_np[0], sampling_period=sampling_period * pq.s, units='mV') # check if the results from different input types are identical freqs_np, psd_np = elephant.spectral.welch_psd(data_np, fs=1 / sampling_period) freqs_neo, psd_neo = elephant.spectral.welch_psd(data_neo) freqs_neo_1dim, psd_neo_1dim = elephant.spectral.welch_psd( data_neo_1dim) self.assertTrue(np.all(freqs_np == freqs_neo)) self.assertTrue(np.all(psd_np == psd_neo)) self.assertTrue(np.all(psd_neo_1dim == psd_neo[0])) class WelchCohereTestCase(unittest.TestCase): def test_welch_cohere_errors(self): # generate a dummy data x = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, units='mV') y = n.AnalogSignal(np.zeros(5000), sampling_period=0.001 * pq.s, units='mV') # check for invalid parameter values # - length of segments self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, len_seg=0) self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, len_seg=x.shape[0] * 2) # - number of segments self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, num_seg=0) self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, num_seg=x.shape[0] * 2) # - frequency resolution self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, freq_res=-1) self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, freq_res=x.sampling_rate / (x.shape[0] + 1)) # - overlap self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, overlap=-1.0) self.assertRaises(ValueError, elephant.spectral.welch_coherence, x, y, overlap=1.1) def test_welch_cohere_behavior(self): # generate data by adding white noise and a sinusoid data_length = 5000 sampling_period = 0.001 signal_freq = 100.0 noise1 = np.random.normal(size=data_length) * 0.01 noise2 = np.random.normal(size=data_length) * 0.01 signal1 = [np.cos(2 * np.pi * signal_freq * t) for t in np.arange(0, data_length * sampling_period, sampling_period)] signal2 = [np.sin(2 * np.pi * signal_freq * t) for t in np.arange(0, data_length * sampling_period, sampling_period)] x = n.AnalogSignal(np.array(signal1 + noise1), units='mV', sampling_period=sampling_period * pq.s) y = n.AnalogSignal(np.array(signal2 + noise2), units='mV', sampling_period=sampling_period * pq.s) # consistency between different ways of specifying segment length freqs1, coherency1, phase_lag1 = elephant.spectral.welch_coherence( x, y, len_segment=data_length // 5, overlap=0) freqs2, coherency2, phase_lag2 = elephant.spectral.welch_coherence( x, y, n_segments=5, overlap=0) self.assertTrue((coherency1 == coherency2).all() and (phase_lag1 == phase_lag2).all() and (freqs1 == freqs2).all()) # frequency resolution and consistency with data freq_res = 1.0 * pq.Hz freqs, coherency, phase_lag = elephant.spectral.welch_coherence( x, y, frequency_resolution=freq_res) self.assertAlmostEqual(freq_res, freqs[1] - freqs[0]) self.assertAlmostEqual(freqs[coherency.argmax()], signal_freq, places=2) self.assertAlmostEqual(phase_lag[coherency.argmax()], -np.pi / 2, places=2) freqs_np, coherency_np, phase_lag_np =\ elephant.spectral.welch_coherence(x.magnitude.flatten(), y.magnitude.flatten(), fs=1 / sampling_period, frequency_resolution=freq_res) assert_array_equal(freqs.simplified.magnitude, freqs_np) assert_array_equal(coherency[:, 0], coherency_np) assert_array_equal(phase_lag[:, 0], phase_lag_np) # - check the behavior of parameter `axis` using multidimensional data num_channel = 4 data_length = 5000 x_multidim = np.random.normal(size=(num_channel, data_length)) y_multidim = np.random.normal(size=(num_channel, data_length)) freqs, coherency, phase_lag =\ elephant.spectral.welch_coherence(x_multidim, y_multidim) freqs_T, coherency_T, phase_lag_T = elephant.spectral.welch_coherence( x_multidim.T, y_multidim.T, axis=0) assert_array_equal(freqs, freqs_T) assert_array_equal(coherency, coherency_T.T) assert_array_equal(phase_lag, phase_lag_T.T) def test_welch_cohere_input_types(self): # generate a test data sampling_period = 0.001 x = n.AnalogSignal(np.array(np.random.normal(size=5000)), sampling_period=sampling_period * pq.s, units='mV') y = n.AnalogSignal(np.array(np.random.normal(size=5000)), sampling_period=sampling_period * pq.s, units='mV') # outputs from AnalogSignal input are of Quantity type # (standard usage) freqs_neo, coherency_neo, phase_lag_neo =\ elephant.spectral.welch_coherence(x, y) self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity)) self.assertTrue(isinstance(phase_lag_neo, pq.quantity.Quantity)) # outputs from Quantity array input are of Quantity type freqs_pq, coherency_pq, phase_lag_pq = elephant.spectral\ .welch_coherence(x.magnitude.flatten() * x.units, y.magnitude.flatten() * y.units, fs=1 / sampling_period) self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity)) self.assertTrue(isinstance(phase_lag_pq, pq.quantity.Quantity)) # outputs from Numpy ndarray input are NOT of Quantity type freqs_np, coherency_np, phase_lag_np = elephant.spectral\ .welch_coherence(x.magnitude.flatten(), y.magnitude.flatten(), fs=1 / sampling_period) self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity)) self.assertFalse(isinstance(phase_lag_np, pq.quantity.Quantity)) # check if the results from different input types are identical self.assertTrue((freqs_neo == freqs_pq).all() and (coherency_neo[:, 0] == coherency_pq).all() and (phase_lag_neo[:, 0] == phase_lag_pq).all()) self.assertTrue((freqs_neo == freqs_np).all() and (coherency_neo[:, 0] == coherency_np).all() and (phase_lag_neo[:, 0] == phase_lag_np).all()) def test_welch_cohere_multidim_input(self): # generate multidimensional data num_channel = 4 data_length = 5000 sampling_period = 0.001 x_np = np.array(np.random.normal(size=(num_channel, data_length))) y_np = np.array(np.random.normal(size=(num_channel, data_length))) # Since row-column order in AnalogSignal is different from the # convention in NumPy/SciPy, `data_np` needs to be transposed when its # used to define an AnalogSignal x_neo = n.AnalogSignal(x_np.T, units='mV', sampling_period=sampling_period * pq.s) y_neo = n.AnalogSignal(y_np.T, units='mV', sampling_period=sampling_period * pq.s) x_neo_1dim = n.AnalogSignal(x_np[0], units='mV', sampling_period=sampling_period * pq.s) y_neo_1dim = n.AnalogSignal(y_np[0], units='mV', sampling_period=sampling_period * pq.s) # check if the results from different input types are identical freqs_np, coherency_np, phase_lag_np = elephant.spectral\ .welch_coherence(x_np, y_np, fs=1 / sampling_period) freqs_neo, coherency_neo, phase_lag_neo =\ elephant.spectral.welch_coherence(x_neo, y_neo) freqs_neo_1dim, coherency_neo_1dim, phase_lag_neo_1dim =\ elephant.spectral.welch_coherence(x_neo_1dim, y_neo_1dim) self.assertTrue(np.all(freqs_np == freqs_neo)) self.assertTrue(np.all(coherency_np.T == coherency_neo)) self.assertTrue(np.all(phase_lag_np.T == phase_lag_neo)) self.assertTrue( np.all(coherency_neo_1dim[:, 0] == coherency_neo[:, 0])) self.assertTrue( np.all(phase_lag_neo_1dim[:, 0] == phase_lag_neo[:, 0])) if __name__ == "__main__": unittest.main(verbosity=2)
# -*- coding: utf-8 -*- # Copyright (c) 2010-2017 Tuukka Turto # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Module for testing main configuration """ #pylint: disable=W0614 import herculeum.config.levels from hamcrest import assert_that, is_, not_none # pylint: disable-msg=E0611 from herculeum.config import Configuration from mockito import mock from pyherc.data import Model from pyherc.ports.inventory import InventoryParameters class TestMainConfiguration(): """ Tests for main configuration """ def __init__(self): """ Default constructor """ self.config = None def setup(self): """ Setup test case """ self.config = Configuration(Model(), herculeum.config.levels, mock(), mock()) self.config.initialise() def test_initialisation(self): """ Test that main configuration can be read and initialised properly Note: This test reads configuration from resources directory """ config = self.config assert_that(config.surface_manager, is_(not_none())) assert_that(config.action_factory, is_(not_none())) assert_that(config.item_generator, is_(not_none())) assert_that(config.creature_generator, is_(not_none())) assert_that(config.level_generator_factory, is_(not_none())) assert_that(config.level_size, is_(not_none())) assert_that(config.model, is_(not_none())) assert_that(config.rng, is_(not_none())) def test_first_gate_generator(self): """ Test that first gate level generator can be retrieved and used """ factory = self.config.level_generator_factory generator = factory.get_generator('first gate') level = generator(None) def test_upper_mines_generator(self): """ Test that upper mines level generator can be retrieved """ factory = self.config.level_generator_factory generator = factory.get_generator('upper mines') level = generator(None) def test_lower_mines_generator(self): """ Test that lower mines level can be created """ factory = self.config.level_generator_factory generator = factory.get_generator('lower mines') level = generator(None) def test_forge_generator(self): """ Test that forge can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('forge') level = generator(None) def test_lower_caverns_generator(self): """ Test that lower caverns can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('lower caverns') level = generator(None) def test_middle_caverns_generator(self): """ Test that middle caverns can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('middle caverns') level = generator(None) def test_upper_caverns_generator(self): """ Test that upper caverns can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('upper caverns') level = generator(None) def test_second_gate_generator(self): """ Test that second gate can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('second gate') level = generator(None) def test_lower_maze_generator(self): """ Test that lower maze can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('lower maze') level = generator(None) def test_courtyard_generator(self): """ Test that courtyard can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('courtyard') level = generator(None) def test_upper_maze_generator(self): """ Test that upper maze can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('upper maze') level = generator(None) def test_third_gate_generator(self): """ Test that third gate can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('third gate') level = generator(None) def test_lower_catacombs_generator(self): """ Test that lower catacombs can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('lower catacombs') level = generator(None) def test_central_catacombs_generator(self): """ Test that central catacombs can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('central catacombs') level = generator(None) def test_upper_catacombs_generator(self): """ Test that upper catacombs can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('upper catacombs') level = generator(None) def test_final_gate_generator(self): """ Test that final gate can be generated """ factory = self.config.level_generator_factory generator = factory.get_generator('final gate') level = generator(None) def test_inventory_factory_has_been_initialised(self): """ Test that inventory action factory has been initialised """ factory = self.config.action_factory.get_sub_factory( InventoryParameters(character = None, item = None, sub_action = 'pick up')) assert_that(factory, is_(not_none())) def test_player_character_generator_has_been_initialised(self): """ Test that player character generator is initialised during configuration phase """ assert_that(self.config.player_generator, is_(not_none()))