repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
FreeAgent/djangoappengine-starter
django/contrib/admindocs/views.py
296
15504
from django import template, templatetags from django.template import RequestContext from django.conf import settings from django.contrib.admin.views.decorators import staff_member_required from django.db import models from django.shortcuts import render_to_response from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.http import Http404 from django.core import urlresolvers from django.contrib.admindocs import utils from django.contrib.sites.models import Site from django.utils.importlib import import_module from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe import inspect, os, re # Exclude methods starting with these strings from documentation MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_') class GenericSite(object): domain = 'example.com' name = 'my site' def get_root_path(): try: return urlresolvers.reverse('admin:index') except urlresolvers.NoReverseMatch: from django.contrib import admin try: return urlresolvers.reverse(admin.site.root, args=['']) except urlresolvers.NoReverseMatch: return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/") def doc_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) return render_to_response('admin_doc/index.html', { 'root_path': get_root_path(), }, context_instance=RequestContext(request)) doc_index = staff_member_required(doc_index) def bookmarklets(request): admin_root = get_root_path() return render_to_response('admin_doc/bookmarklets.html', { 'root_path': admin_root, 'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)), }, context_instance=RequestContext(request)) bookmarklets = staff_member_required(bookmarklets) def template_tag_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() tags = [] app_libs = template.libraries.items() builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) if title: title = utils.parse_rst(title, 'tag', _('tag:') + tag_name) if body: body = utils.parse_rst(body, 'tag', _('tag:') + tag_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name) if library in template.builtins: tag_library = None else: tag_library = module_name.split('.')[-1] tags.append({ 'name': tag_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_tag_index.html', { 'root_path': get_root_path(), 'tags': tags }, context_instance=RequestContext(request)) template_tag_index = staff_member_required(template_tag_index) def template_filter_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() filters = [] app_libs = template.libraries.items() builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for filter_name, filter_func in library.filters.items(): title, body, metadata = utils.parse_docstring(filter_func.__doc__) if title: title = utils.parse_rst(title, 'filter', _('filter:') + filter_name) if body: body = utils.parse_rst(body, 'filter', _('filter:') + filter_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name) if library in template.builtins: tag_library = None else: tag_library = module_name.split('.')[-1] filters.append({ 'name': filter_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_filter_index.html', { 'root_path': get_root_path(), 'filters': filters }, context_instance=RequestContext(request)) template_filter_index = staff_member_required(template_filter_index) def view_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) if settings.ADMIN_FOR: settings_modules = [import_module(m) for m in settings.ADMIN_FOR] else: settings_modules = [settings] views = [] for settings_mod in settings_modules: urlconf = import_module(settings_mod.ROOT_URLCONF) view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for (func, regex) in view_functions: views.append({ 'name': getattr(func, '__name__', func.__class__.__name__), 'module': func.__module__, 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'url': simplify_regex(regex), }) return render_to_response('admin_doc/view_index.html', { 'root_path': get_root_path(), 'views': views }, context_instance=RequestContext(request)) view_index = staff_member_required(view_index) def view_detail(request, view): if not utils.docutils_is_available: return missing_docutils_page(request) mod, func = urlresolvers.get_mod_func(view) try: view_func = getattr(import_module(mod), func) except (ImportError, AttributeError): raise Http404 title, body, metadata = utils.parse_docstring(view_func.__doc__) if title: title = utils.parse_rst(title, 'view', _('view:') + view) if body: body = utils.parse_rst(body, 'view', _('view:') + view) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view) return render_to_response('admin_doc/view_detail.html', { 'root_path': get_root_path(), 'name': view, 'summary': title, 'body': body, 'meta': metadata, }, context_instance=RequestContext(request)) view_detail = staff_member_required(view_detail) def model_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) m_list = [m._meta for m in models.get_models()] return render_to_response('admin_doc/model_index.html', { 'root_path': get_root_path(), 'models': m_list }, context_instance=RequestContext(request)) model_index = staff_member_required(model_index) def model_detail(request, app_label, model_name): if not utils.docutils_is_available: return missing_docutils_page(request) # Get the model class. try: app_mod = models.get_app(app_label) except ImproperlyConfigured: raise Http404(_("App %r not found") % app_label) model = None for m in models.get_models(app_mod): if m._meta.object_name.lower() == model_name: model = m break if model is None: raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label}) opts = model._meta # Gather fields/field descriptions. fields = [] for field in opts.fields: # ForeignKey is a special case since the field will actually be a # descriptor that returns the other object if isinstance(field, models.ForeignKey): data_type = related_object_name = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type) else: data_type = get_readable_field_data_type(field) verbose = field.verbose_name fields.append({ 'name': field.name, 'data_type': data_type, 'verbose': verbose, 'help_text': field.help_text, }) # Gather many-to-many fields. for field in opts.many_to_many: data_type = related_object_name = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type} fields.append({ 'name': "%s.all" % field.name, "data_type": 'List', 'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name), }) fields.append({ 'name' : "%s.count" % field.name, 'data_type' : 'Integer', 'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name), }) # Gather model methods. for func_name, func in model.__dict__.items(): if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1): try: for exclude in MODEL_METHODS_EXCLUDE: if func_name.startswith(exclude): raise StopIteration except StopIteration: continue verbose = func.__doc__ if verbose: verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name) fields.append({ 'name': func_name, 'data_type': get_return_data_type(func_name), 'verbose': verbose, }) # Gather related objects for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects(): verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name} accessor = rel.get_accessor_name() fields.append({ 'name' : "%s.all" % accessor, 'data_type' : 'List', 'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name), }) fields.append({ 'name' : "%s.count" % accessor, 'data_type' : 'Integer', 'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name), }) return render_to_response('admin_doc/model_detail.html', { 'root_path': get_root_path(), 'name': '%s.%s' % (opts.app_label, opts.object_name), 'summary': _("Fields on %s objects") % opts.object_name, 'description': model.__doc__, 'fields': fields, }, context_instance=RequestContext(request)) model_detail = staff_member_required(model_detail) def template_detail(request, template): templates = [] for site_settings_module in settings.ADMIN_FOR: settings_mod = import_module(site_settings_module) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for dir in settings_mod.TEMPLATE_DIRS: template_file = os.path.join(dir, template) templates.append({ 'file': template_file, 'exists': os.path.exists(template_file), 'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '', 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'order': list(settings_mod.TEMPLATE_DIRS).index(dir), }) return render_to_response('admin_doc/template_detail.html', { 'root_path': get_root_path(), 'name': template, 'templates': templates, }, context_instance=RequestContext(request)) template_detail = staff_member_required(template_detail) #################### # Helper functions # #################### def missing_docutils_page(request): """Display an error message for people without docutils""" return render_to_response('admin_doc/missing_docutils.html') def load_all_installed_template_libraries(): # Load/register all template tag libraries from installed apps. for module_name in template.get_templatetags_modules(): mod = import_module(module_name) libraries = [ os.path.splitext(p)[0] for p in os.listdir(os.path.dirname(mod.__file__)) if p.endswith('.py') and p[0].isalpha() ] for library_name in libraries: try: lib = template.get_library(library_name) except template.InvalidTemplateLibrary, e: pass def get_return_data_type(func_name): """Return a somewhat-helpful data type given a function name""" if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return '' def get_readable_field_data_type(field): """Returns the description for a given field type, if it exists, Fields' descriptions can contain format strings, which will be interpolated against the values of field.__dict__ before being output.""" return field.description % field.__dict__ def extract_views_from_urlpatterns(urlpatterns, base=''): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) """ views = [] for p in urlpatterns: if hasattr(p, '_get_callback'): try: views.append((p._get_callback(), base + p.regex.pattern)) except ViewDoesNotExist: continue elif hasattr(p, '_get_url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern)) else: raise TypeError(_("%s does not appear to be a urlpattern object") % p) return views named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)') non_named_group_matcher = re.compile(r'\(.*?\)') def simplify_regex(pattern): """ Clean up urlpattern regexes into something somewhat readable by Mere Humans: turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into "<sport_slug>/athletes/<athlete_slug>/" """ # handle named groups first pattern = named_group_matcher.sub(lambda m: m.group(1), pattern) # handle non-named groups pattern = non_named_group_matcher.sub("<var>", pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern
bsd-3-clause
michaelconnor00/gbdxtools
gbdxtools/catalog_search_aoi.py
1
3959
""" GBDX Catalog Search Helper Functions. This set of functions is used for breaking up a large AOI into smaller AOIs to search, because the catalog API can only handle 2 square degrees at a time. """ from builtins import zip from builtins import range from pygeoif import geometry import json def point_in_poly(x,y,poly): n = len(poly) inside = False p1x,p1y = poly[0] for i in range(n+1): p2x,p2y = poly[i % n] if y > min(p1y,p2y): if y <= max(p1y,p2y): if x <= max(p1x,p2x): if p1y != p2y: xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x if p1x == p2x or x <= xints: inside = not inside p1x,p1y = p2x,p2y return inside # range() but for float steps def xfrange(start, stop, step): while start < stop: yield start start += step else: yield stop def dedup_records(records): # 0.5 seconds for 5k records #print "Records: %s" % len(records) ids = set( [r['identifier'] for r in records] ) #print "Ids: %s" % len(ids) deduped = [] for r in records: if r['identifier'] in ids: deduped.append(r) ids = ids - set( [ r['identifier'] ] ) #print "Deduped: %s" % len(deduped) return deduped def bbox_in_poly(bbox,poly): W, S, E, N = bbox.bounds points = [(W,N),(E,N),(E,S),(W,S)] for p in points: if point_in_poly(p[0],p[1], poly.exterior.coords ): return True def records_in_polygon(records,polygon): # Filter out the records that are not inside the polygon output_records = [] for record in records: recordwkt = record['properties']['footprintWkt'] record_polygon = geometry.from_wkt(recordwkt) if bbox_in_poly(record_polygon,polygon): output_records.append(record) #print "Filtered in polygon: %s" % len(output_records) return output_records def polygon_from_bounds( bounds ): W, S, E, N = bounds return geometry.Polygon( ( (W,N),(E,N),(E,S),(W,S),(W,N) ) ) def search_materials_in_multiple_small_searches(search_request, gbdx_connection, base_url): D = 1.4 # the size in degrees of the side of a square that we will search searchAreaWkt = search_request['searchAreaWkt'] searchAreaPolygon = geometry.from_wkt(searchAreaWkt) W, S, E, N = searchAreaPolygon.bounds Ys = [i for i in xfrange(S,N,D)] Xs = [i for i in xfrange(W,E,D)] # Handle point searches: if W == E and N == S: Ys = [S, N] Xs = [W, E] # print Xs # print Ys # print searchAreaWkt records = [] # Loop pairwise row = 0 col = 0 for y, y1 in zip(Ys, Ys[1:]): row = row + 1 for x, x1 in zip(Xs, Xs[1:]): col = col + 1 bbox = (x, y, x1, y1) subsearchpoly = polygon_from_bounds(bbox) # # verify that the subsearchpoly is inside the searchAreaPolygon. If not break. if not bbox_in_poly(subsearchpoly,searchAreaPolygon) and not bbox_in_poly(searchAreaPolygon, subsearchpoly) and not (y == y1 and x == x1): pass else: search_request['searchAreaWkt'] = subsearchpoly.wkt url = '%(base_url)s/search?includeRelationships=false' % { 'base_url': base_url } headers = {'Content-Type':'application/json'} r = gbdx_connection.post(url, headers=headers, data=json.dumps(search_request)) r.raise_for_status() records = records + r.json()['results'] records = dedup_records(records) # this next line works, but filters too much stuff. It removes some items intersecting the polygon. #records = records_in_polygon(records, searchAreaPolygon) # this takes quite a while to run, so leave it commented return records
mit
leighpauls/k2cro4
testing/gtest/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = '[email protected] (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
jjscarafia/odoo
addons/base_import/tests/test_cases.py
84
13383
# -*- encoding: utf-8 -*- import unittest2 from openerp.tests.common import TransactionCase from .. import models ID_FIELD = { 'id': 'id', 'name': 'id', 'string': "External ID", 'required': False, 'fields': [], } def make_field(name='value', string='unknown', required=False, fields=[]): return [ ID_FIELD, {'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields}, ] def sorted_fields(fields): """ recursively sort field lists to ease comparison """ recursed = [dict(field, fields=sorted_fields(field['fields'])) for field in fields] return sorted(recursed, key=lambda field: field['id']) class BaseImportCase(TransactionCase): def assertEqualFields(self, fields1, fields2): self.assertEqual(sorted_fields(fields1), sorted_fields(fields2)) class test_basic_fields(BaseImportCase): def get_fields(self, field): return self.registry('base_import.import')\ .get_fields(self.cr, self.uid, 'base_import.tests.models.' + field) def test_base(self): """ A basic field is not required """ self.assertEqualFields(self.get_fields('char'), make_field()) def test_required(self): """ Required fields should be flagged (so they can be fill-required) """ self.assertEqualFields(self.get_fields('char.required'), make_field(required=True)) def test_readonly(self): """ Readonly fields should be filtered out""" self.assertEqualFields(self.get_fields('char.readonly'), [ID_FIELD]) def test_readonly_states(self): """ Readonly fields with states should not be filtered out""" self.assertEqualFields(self.get_fields('char.states'), make_field()) def test_readonly_states_noreadonly(self): """ Readonly fields with states having nothing to do with readonly should still be filtered out""" self.assertEqualFields(self.get_fields('char.noreadonly'), [ID_FIELD]) def test_readonly_states_stillreadonly(self): """ Readonly fields with readonly states leaving them readonly always... filtered out""" self.assertEqualFields(self.get_fields('char.stillreadonly'), [ID_FIELD]) def test_m2o(self): """ M2O fields should allow import of themselves (name_get), their id and their xid""" self.assertEqualFields(self.get_fields('m2o'), make_field(fields=[ {'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []}, {'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []}, ])) def test_m2o_required(self): """ If an m2o field is required, its three sub-fields are required as well (the client has to handle that: requiredness is id-based) """ self.assertEqualFields(self.get_fields('m2o.required'), make_field(required=True, fields=[ {'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []}, {'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []}, ])) class test_o2m(BaseImportCase): def get_fields(self, field): return self.registry('base_import.import')\ .get_fields(self.cr, self.uid, 'base_import.tests.models.' + field) def test_shallow(self): self.assertEqualFields(self.get_fields('o2m'), make_field(fields=[ ID_FIELD, # FIXME: should reverse field be ignored? {'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [ {'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []}, {'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []}, ]}, {'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []}, ])) class test_match_headers_single(TransactionCase): def test_match_by_name(self): match = self.registry('base_import.import')._match_header( 'f0', [{'name': 'f0'}], {}) self.assertEqual(match, [{'name': 'f0'}]) def test_match_by_string(self): match = self.registry('base_import.import')._match_header( 'some field', [{'name': 'bob', 'string': "Some Field"}], {}) self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}]) def test_nomatch(self): match = self.registry('base_import.import')._match_header( 'should not be', [{'name': 'bob', 'string': "wheee"}], {}) self.assertEqual(match, []) def test_recursive_match(self): f = { 'name': 'f0', 'string': "My Field", 'fields': [ {'name': 'f0', 'string': "Sub field 0", 'fields': []}, {'name': 'f1', 'string': "Sub field 2", 'fields': []}, ] } match = self.registry('base_import.import')._match_header( 'f0/f1', [f], {}) self.assertEqual(match, [f, f['fields'][1]]) def test_recursive_nomatch(self): """ Match first level, fail to match second level """ f = { 'name': 'f0', 'string': "My Field", 'fields': [ {'name': 'f0', 'string': "Sub field 0", 'fields': []}, {'name': 'f1', 'string': "Sub field 2", 'fields': []}, ] } match = self.registry('base_import.import')._match_header( 'f0/f2', [f], {}) self.assertEqual(match, []) class test_match_headers_multiple(TransactionCase): def test_noheaders(self): self.assertEqual( self.registry('base_import.import')._match_headers( [], [], {}), (None, None) ) def test_nomatch(self): self.assertEqual( self.registry('base_import.import')._match_headers( iter([ ['foo', 'bar', 'baz', 'qux'], ['v1', 'v2', 'v3', 'v4'], ]), [], {'headers': True}), ( ['foo', 'bar', 'baz', 'qux'], dict.fromkeys(range(4)) ) ) def test_mixed(self): self.assertEqual( self.registry('base_import.import')._match_headers( iter(['foo bar baz qux/corge'.split()]), [ {'name': 'bar', 'string': 'Bar'}, {'name': 'bob', 'string': 'Baz'}, {'name': 'qux', 'string': 'Qux', 'fields': [ {'name': 'corge', 'fields': []}, ]} ], {'headers': True}), (['foo', 'bar', 'baz', 'qux/corge'], { 0: None, 1: ['bar'], 2: ['bob'], 3: ['qux', 'corge'], }) ) class test_preview(TransactionCase): def make_import(self): Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'res.users', 'file': u"로그인,언어\nbob,1\n".encode('euc_kr'), }) return Import, id def test_encoding(self): Import, id = self.make_import() result = Import.parse_preview(self.cr, self.uid, id, { 'quoting': '"', 'separator': ',', }) self.assertTrue('error' in result) def test_csv_errors(self): Import, id = self.make_import() result = Import.parse_preview(self.cr, self.uid, id, { 'quoting': 'foo', 'separator': ',', 'encoding': 'euc_kr', }) self.assertTrue('error' in result) def test_csv_errors(self): Import, id = self.make_import() result = Import.parse_preview(self.cr, self.uid, id, { 'quoting': '"', 'separator': 'bob', 'encoding': 'euc_kr', }) self.assertTrue('error' in result) def test_success(self): Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' 'bar,3,4\n' 'qux,5,6\n' }) result = Import.parse_preview(self.cr, self.uid, id, { 'quoting': '"', 'separator': ',', 'headers': True, }) self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None}) self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter']) # Order depends on iteration order of fields_get self.assertItemsEqual(result['fields'], [ ID_FIELD, {'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []}, {'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []}, {'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []}, ]) self.assertEqual(result['preview'], [ ['foo', '1', '2'], ['bar', '3', '4'], ['qux', '5', '6'], ]) # Ensure we only have the response fields we expect self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview']) class test_convert_import_data(TransactionCase): """ Tests conversion of base_import.import input into data which can be fed to Model.import_data """ def test_all(self): Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' 'bar,3,4\n' 'qux,5,6\n' }) record = Import.browse(self.cr, self.uid, id) data, fields = Import._convert_import_data( record, ['name', 'somevalue', 'othervalue'], {'quoting': '"', 'separator': ',', 'headers': True,}) self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue']) self.assertItemsEqual(data, [ ('foo', '1', '2'), ('bar', '3', '4'), ('qux', '5', '6'), ]) def test_filtered(self): """ If ``False`` is provided as field mapping for a column, that column should be removed from importable data """ Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' 'bar,3,4\n' 'qux,5,6\n' }) record = Import.browse(self.cr, self.uid, id) data, fields = Import._convert_import_data( record, ['name', False, 'othervalue'], {'quoting': '"', 'separator': ',', 'headers': True,}) self.assertItemsEqual(fields, ['name', 'othervalue']) self.assertItemsEqual(data, [ ('foo', '2'), ('bar', '4'), ('qux', '6'), ]) def test_norow(self): """ If a row is composed only of empty values (due to having filtered out non-empty values from it), it should be removed """ Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' ',3,\n' ',5,6\n' }) record = Import.browse(self.cr, self.uid, id) data, fields = Import._convert_import_data( record, ['name', False, 'othervalue'], {'quoting': '"', 'separator': ',', 'headers': True,}) self.assertItemsEqual(fields, ['name', 'othervalue']) self.assertItemsEqual(data, [ ('foo', '2'), ('', '6'), ]) def test_nofield(self): Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' }) record = Import.browse(self.cr, self.uid, id) self.assertRaises( ValueError, Import._convert_import_data, record, [], {'quoting': '"', 'separator': ',', 'headers': True,}) def test_falsefields(self): Import = self.registry('base_import.import') id = Import.create(self.cr, self.uid, { 'res_model': 'base_import.tests.models.preview', 'file': 'name,Some Value,Counter\n' 'foo,1,2\n' }) record = Import.browse(self.cr, self.uid, id) self.assertRaises( ValueError, Import._convert_import_data, record, [False, False, False], {'quoting': '"', 'separator': ',', 'headers': True,})
agpl-3.0
shimpe/pyvectortween
vectortween/SequentialAnimation.py
1
2872
from copy import deepcopy import numpy as np from vectortween.Animation import Animation from vectortween.Mapping import Mapping from vectortween.Tween import Tween def normalize(x): return x / sum(x) class SequentialAnimation(Animation): def __init__(self, list_of_animations=None, timeweight=None, repeats=None, tween=None): super().__init__(None, None) if tween is None: tween = ['linear'] if timeweight is None: timeweight = [] if list_of_animations is None: list_of_animations = [] if repeats is None: repeats = 1 self.ListOfAnimations = [] self.ListOfAnimationTimeWeight = np.array([]) self.CumulativeNormalizedTimeWeights = np.array([]) self.T = Tween(*tween) if list_of_animations: for r in range(repeats): if not timeweight: for a in list_of_animations: self.add(a, 1) else: for a, t in zip(list_of_animations, timeweight): self.add(a, t) def add(self, anim, timeweight=1): self.ListOfAnimations.append(deepcopy(anim)) self.ListOfAnimationTimeWeight = np.append(self.ListOfAnimationTimeWeight, [timeweight]) self.CumulativeNormalizedTimeWeights = np.cumsum(normalize(self.ListOfAnimationTimeWeight)) def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None): if birthframe is None: birthframe = startframe if deathframe is None: deathframe = stopframe if frame < birthframe: return None if frame > deathframe: return None if frame < startframe: return self.ListOfAnimations[0].make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe) if frame > stopframe: return self.ListOfAnimations[-1].make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe) t = self.T.tween2(frame, startframe, stopframe) if t is None: return None for i, w in enumerate(self.CumulativeNormalizedTimeWeights): if t <= w: if i == 0: # reached the end of the cumulative weights relativestartframe = 0 else: relativestartframe = self.CumulativeNormalizedTimeWeights[i - 1] relativestopframe = self.CumulativeNormalizedTimeWeights[i] absstartframe = Mapping.linlin(relativestartframe, 0, 1, startframe, stopframe) absstopframe = Mapping.linlin(relativestopframe, 0, 1, startframe, stopframe) return self.ListOfAnimations[i].make_frame(frame, birthframe, absstartframe, absstopframe, deathframe, noiseframe)
mit
spaceof7/QGIS
python/plugins/MetaSearch/pavement.py
67
7402
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2014 Tom Kralidis ([email protected]) # # This source is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This code is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ############################################################################### from configparser import ConfigParser import getpass import os import shutil import xml.etree.ElementTree as etree import xmlrpc.client import zipfile from paver.easy import (call_task, cmdopts, error, info, options, path, sh, task, Bunch) from owslib.csw import CatalogueServiceWeb # spellok PLUGIN_NAME = 'MetaSearch' BASEDIR = os.path.abspath(os.path.dirname(__file__)) USERDIR = os.path.expanduser('~') with open('metadata.txt') as mf: cp = ConfigParser() cp.readfp(mf) VERSION = cp.get('general', 'version') options( base=Bunch( home=BASEDIR, plugin=path(BASEDIR), ui=path(BASEDIR) / 'plugin' / PLUGIN_NAME / 'ui', install=path('%s/.qgis3/python/plugins/MetaSearch' % USERDIR), ext_libs=path('plugin/MetaSearch/ext-libs'), tmp=path(path('%s/MetaSearch-dist' % USERDIR)), version=VERSION ), upload=Bunch( host='plugins.qgis.org', port=80, endpoint='plugins/RPC2/' ) ) @task def clean(): """clean environment""" if os.path.exists(options.base.install): if os.path.islink(options.base.install): os.unlink(options.base.install) else: shutil.rmtree(options.base.install) if os.path.exists(options.base.tmp): shutil.rmtree(options.base.tmp) if os.path.exists(options.base.ext_libs): shutil.rmtree(options.base.ext_libs) for ui_file in os.listdir(options.base.ui): if ui_file.endswith('.py') and ui_file != '__init__.py': os.remove(options.base.plugin / 'ui' / ui_file) os.remove(path(options.base.home) / '%s.pro' % PLUGIN_NAME) sh('git clean -dxf') @task def install(): """install plugin into user QGIS environment""" plugins_dir = path(USERDIR) / '.qgis3/python/plugins' if os.path.exists(options.base.install): if os.path.islink(options.base.install): os.unlink(options.base.install) else: shutil.rmtree(options.base.install) if not os.path.exists(plugins_dir): raise OSError('The directory %s does not exist.' % plugins_dir) if not hasattr(os, 'symlink'): shutil.copytree(options.base.plugin, options.base.install) elif not os.path.exists(options.base.install): os.symlink(options.base.plugin, options.base.install) @task def package(): """create zip file of plugin""" skip_files = [ 'AUTHORS.txt', 'CMakeLists.txt', 'requirements.txt', 'requirements-dev.txt', 'pavement.txt' ] package_file = get_package_filename() if not os.path.exists(options.base.tmp): options.base.tmp.mkdir() if os.path.exists(package_file): os.unlink(package_file) with zipfile.ZipFile(package_file, 'w', zipfile.ZIP_DEFLATED) as zipf: for root, dirs, files in os.walk(options.base.plugin): for file_add in files: if file_add.endswith('.pyc') or file_add in skip_files: continue filepath = os.path.join(root, file_add) relpath = os.path.join(PLUGIN_NAME, os.path.relpath(filepath)) zipf.write(filepath, relpath) return package_file # return name of created zipfile @task @cmdopts([ ('user=', 'u', 'OSGeo userid'), ]) def upload(): """upload package zipfile to server""" user = options.get('user', False) if not user: raise ValueError('OSGeo userid required') password = getpass.getpass('Enter your password: ') if password.strip() == '': raise ValueError('password required') call_task('package') zipf = get_package_filename() url = 'http://%s:%s@%s:%d/%s' % (user, password, options.upload.host, options.upload.port, options.upload.endpoint) info('Uploading to http://%s/%s' % (options.upload.host, options.upload.endpoint)) server = xmlrpc.client.ServerProxy(url, verbose=False) try: with open(zipf) as zfile: plugin_id, version_id = \ server.plugin.upload(xmlrpc.client.Binary(zfile.read())) info('Plugin ID: %s', plugin_id) info('Version ID: %s', version_id) except xmlrpc.client.Fault as err: error('ERROR: fault error') error('Fault code: %d', err.faultCode) error('Fault string: %s', err.faultString) except xmlrpc.client.ProtocolError as err: error('Error: Protocol error') error("%s : %s", err.errcode, err.errmsg) if err.errcode == 403: error('Invalid name and password') @task def test_default_csw_connections(): """test that the default CSW connections work""" relpath = 'resources%sconnections-default.xml' % os.sep csw_connections_xml = options.base.plugin / relpath conns = etree.parse(csw_connections_xml) for conn in conns.findall('csw'): try: csw = CatalogueServiceWeb(conn.attrib.get('url')) # spellok info('Success: %s', csw.identification.title) csw.getrecords2() except Exception as err: raise ValueError('ERROR: %s', err) @task @cmdopts([ ('filename=', 'f', 'Path to file of CSW URLs'), ]) def generate_csw_connections_file(): """generate a CSW connections file from a flat file of CSW URLs""" filename = options.get('filename', False) if not filename: raise ValueError('path to file of CSW URLs required') conns = etree.Element('qgsCSWConnections') conns.attrib['version'] = '1.0' with open(filename) as connsfh: for line in connsfh: url = line.strip() if not url: # blank line continue try: csw = CatalogueServiceWeb(url) # spellok title = str(csw.identification.title) etree.SubElement(conns, 'csw', name=title, url=url) except Exception as err: error('ERROR on CSW %s: %s', url, err) with open('%s.xml' % filename, 'w') as connsxmlfh: connsxmlfh.write(etree.tostring(conns, encoding='utf-8')) def get_package_filename(): """return filepath of plugin zipfile""" filename = '%s-%s.zip' % (PLUGIN_NAME, options.base.version) package_file = '%s/%s' % (options.base.tmp, filename) return package_file
gpl-2.0
trabucayre/periphondemand
periphondemand/toolchains/synthesis/vivado/vivado.py
2
26838
#! /usr/bin/python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Name: ise.py # Purpose: # Author: Gwenhael Goavec-Merou <[email protected]> # Created: 21/07/2015 # ----------------------------------------------------------------------------- # Copyright (2008) Armadeus Systems # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # ----------------------------------------------------------------------------- # Revision list : # # Date By Changes # # ----------------------------------------------------------------------------- """ Manage Vivado toolchain """ import os from periphondemand.bin.define import BINARYPROJECTPATH from periphondemand.bin.define import BINARY_PREFIX from periphondemand.bin.define import SYNTHESISPATH from periphondemand.bin.define import OBJSPATH from periphondemand.bin.define import VHDLEXT from periphondemand.bin.define import TCLEXT from periphondemand.bin.define import XILINX_BITSTREAM_SUFFIX from periphondemand.bin.define import XILINX_BINARY_SUFFIX from periphondemand.bin.define import COLOR_END from periphondemand.bin.define import COLOR_SHELL from periphondemand.bin.utils.settings import Settings from periphondemand.bin.utils.poderror import PodError from periphondemand.bin.utils.display import Display from periphondemand.bin.utils import wrappersystem as sy from periphondemand.bin.toolchain.synthesis import Synthesis SETTINGS = Settings() DISPLAY = Display() class Vivado(Synthesis): """ Manage specific synthesis part for vivado toolchain """ SYNTH_CMD = "vivado" name = "vivado" def __init__(self, parent): """ constructor """ Synthesis.__init__(self, parent) tool = self.synthesis_toolcommandname command = "-version" cont = [] cont = list(os.popen(tool + " " + command)) self.version = cont[0].split(" ")[1][1:] self.base_version = self.version.split(".")[0] @classmethod def constraints_file_extension(cls): return ("xdc") def need_block_design(self): """ Check if design need to generate a block design file """ list_bd_comp = [] for component in self.project.instances: bd_node = component.get_nodes("vivado") if not (len(bd_node) == 0): list_bd_comp.append(component) return list_bd_comp def generate_block_design(self, component): """ Generate the block design file for xilinx fpga """ out = "set design_name " + component.name + "_bd\n\n" out += "# CHECKING IF PROJECT EXISTS\n" out += 'if { [get_projects -quiet] eq "" } {\n' out += ' puts "ERROR: Please open or create a project!"\n' out += " return 1\n" out += "}\n\n\n" out += "# Creating design if needed\n\n" out += " # USE CASES:\n" out += " # 8) No opened design, design_name not in project.\n" out += " # 9) Current opened design, has components, but " + \ "diff names, design_name not in project.\n\n" out += ' puts "INFO: Currently there is no design ' + \ '<$design_name> in project, so creating one..."\n\n' out += " create_bd_design $design_name\n\n" out += ' puts "INFO: Making design <$design_name> as ' + \ 'current_bd_design."\n' out += " current_bd_design $design_name\n\n" out += 'puts "INFO: Currently the variable <design_name> ' + \ 'is equal to \\"$design_name\\"."\n\n' out += "\n\n\n" out += \ "# Procedure to create entire design; Provide " + \ "argument to make\n" + \ '# procedure reusable. If parentCell is "", will ' + \ 'use root.\n' + \ "proc create_root_design { parentCell } {\n\n" + \ ' if { $parentCell eq "" } {\n' + \ " set parentCell [get_bd_cells /]\n" + \ " }\n\n" + \ " # Get object for parentCell\n" + \ " set parentObj [get_bd_cells $parentCell]\n" + \ ' if { $parentObj == "" } {\n' + \ ' puts "ERROR: Unable to find parent cell ' + \ ' <$parentCell>!"\n' + \ " return\n" + \ " }\n\n" out += \ " # Make sure parentObj is hier blk\n" + \ " set parentType [get_property TYPE $parentObj]\n" + \ ' if { $parentType ne "hier" } {\n' + \ ' puts "ERROR: Parent <$parentObj> has TYPE = ' + \ ' <$parentType>. ' + \ 'Expected to be <hier>."\n' + \ " return\n" + \ " }\n\n" + \ " # Save current instance; Restore later\n" + \ " set oldCurInst [current_bd_instance .]\n\n" + \ " # Set parent object as current\n" + \ " current_bd_instance $parentObj\n" out += "\n\n" out += " # Create interface ports\n" vivado_node = component.get_nodes("vivado") for vivado_if in vivado_node[0].get_subnodes("vivado_interfaces", "vivado_interface"): if_params = vivado_if.get_nodes("parameter") out += " set " + vivado_if.get_attr_value("instance_name") + \ " [ create_bd_intf_port -mode " + \ vivado_if.get_attr_value("mode") + " " + \ vivado_if.get_attr_value("options") + " " + \ vivado_if.get_attr_value("name") + " " + \ vivado_if.get_attr_value("instance_name") + " ]\n" if if_params != []: out += " set_property -dict [ list " for param in if_params: out += " CONFIG." + param.get_attr_value("name") + \ " {" + param.text + "} " out += " ] $" + \ vivado_if.get_attr_value("instance_name") + "\n" out += "\n" out += " # Create ports\n" for vivado_if in vivado_node[0].get_subnodes("vivado_ports", "vivado_port"): if_params = vivado_if.get_nodes("parameter") out += " set " + vivado_if.get_attr_value("instance_name") + \ " [ create_bd_port -dir " + \ vivado_if.get_attr_value("direction") # if if_from != None: # out += " -from " + if_from) # if if_to != None: # out += " -to " + if_to) out += " -type " + vivado_if.get_attr_value("type") + \ " " + vivado_if.get_attr_value("instance_name") + " ]\n" if if_params != []: out += " set_property -dict [ list " for param in if_params: out += "CONFIG." + param.get_attr_value("name") + \ " {" + param.text + "} " out += " ] $" + vivado_if.get_attr_value("instance_name") + \ "\n" out += "\n" vivado_comps = vivado_node[0].get_subnodes("vivado_components", "vivado_component") for comp in vivado_comps: cp_params = comp.get_nodes("parameter") out += " # Create instance: " + \ comp.get_attr_value("instance_name") + \ ", and set properties\n" out += " set " + comp.get_attr_value("instance_name") + \ " [ create_bd_cell " + \ "-type " + comp.get_attr_value("type") + " " + \ comp.get_attr_value("options") + " " + \ comp.get_attr_value("name") + \ " " + comp.get_attr_value("instance_name") + " ]\n" if cp_params != []: out += " set_property -dict [ list " for param in cp_params: out += "CONFIG." + param.get_attr_value("name") + \ " {" + param.text + "} \\\n" out += " ] $" + comp.get_attr_value("instance_name") + "\n" out += "\n" out += " # Create interface connections\n" vivado_conns = vivado_node[0].get_subnodes("ifs_connections", "connection") for conn in vivado_conns: out += " connect_bd_intf_net -intf_net " + \ conn.get_attr_value("src") for dest in conn.get_nodes("dest"): dest_type = dest.get_attr_value("type") out += " [get_bd_intf_" if dest_type == "port": out += "ports" else: out += "pins" out += " " + dest.get_attr_value("name") + "]" out += "\n" out += "\n" out += " # Create port connections\n" vivado_conns = vivado_node[0].get_subnodes("ports_connections", "connection") for conn in vivado_conns: out += " connect_bd_net -net " + conn.get_attr_value("src") for dest in conn.get_nodes("dest"): dest_type = dest.get_attr_value("type") out += " [get_bd_" if dest_type == "port": out += "ports" else: out += "pins" out += " " + dest.get_attr_value("name") + "]" out += "\n" out += "\n" out += " # Create address segments\n" + \ " create_bd_addr_seg -range 0x10000 -offset 0x43C00000 " + \ "[get_bd_addr_spaces processing_system7_0/Data] " + \ "[get_bd_addr_segs M00_AXI/Reg] SEG_M00_AXI_Reg\n \n\n" out += " # Restore current instance\n" + \ " current_bd_instance $oldCurInst\n\n" + \ " save_bd_design\n" out += "}\n" out += "# End of create_root_design()\n\n\n" out += "#####################################################\n" out += "# MAIN FLOW\n" out += "#####################################################\n" out += "\n" out += 'create_root_design ""\n\n\n' tclfile = open(self.project.projectpath + SYNTHESISPATH + "/" + component.name + "_bd.tcl", "w") tclfile.write(out) def add_constraints_file(self, filename): """ return line for constraints file insertion """ out = "# Set 'constrs_1' fileset object\n" out += "set obj [get_filesets constrs_1]\n" out += "\n" out += "# Add/Import constrs file and set constrs file properties\n" out += 'set file "[file normalize "..' + SYNTHESISPATH + "/" + \ self.project.name + '.xdc"]"\n' out += "set file_added [add_files -norecurse -fileset $obj $file]\n" out += 'set file "..' + SYNTHESISPATH + "/" + \ self.project.name + '.xdc"\n' out += "set file [file normalize $file]\n" out += 'set file_obj [get_files -of_objects ' + \ '[get_filesets constrs_1] [list "*$file"]]\n' out += 'set_property "file_type" "XDC" $file_obj\n' out += "\n" out += "\n" return out def generatelibraryconstraints(self): # TODO """ Adds constraints specified by a component, such as placement for a PLL, multiplier, etc. or clock informations about PLL output signals """ out = "# components constraints \n" for instance in self.project.instances: if instance.constraints != []: for constraint in instance.constraints: inst_name = instance.instancename attr_name = str(constraint.get_attr_value("name")) constr_type = constraint.get_attr_value("type") sig_type = constraint.get_attr_value("sig_type") if sig_type is None: sig_type = "ports" if constr_type == "clk": frequency = constraint.get_attr_value("frequency") freq = " %g" % ((1000 / float(frequency))) out += "create_clock -period " + freq + \ " -name " + inst_name + "_" + attr_name + \ " [get_" + sig_type + " " + inst_name if sig_type == "ports": out += "_" else: out += "/" out += attr_name + "]\n" elif constr_type == "placement": out += 'INST "' + inst_name + "/" + \ attr_name + '" LOC=' + \ constraint.get_attr_value("loc") + ";\n" elif constr_type == "false_path": # GGM : add verification : this attributes are # mandatory for false_path src_type = constraint.get_attr_value("src_type") dest_type = constraint.get_attr_value("dest_type") out += "set_false_path -from [get_" if src_type == "clocks" or src_type == "inst_clocks": out += "clocks " else: out += src_type if src_type == "inst_clocks": out += inst_name + "_" elif src_type == "pins": out += inst_name + "/" out += constraint.get_attr_value("src") + \ "] -to [get_" if dest_type == "clocks" or dest_type == "inst_clocks": out += "clocks " else: out += src_type if dest_type == "inst_clocks": out += inst_name + "_" elif dest_type == "pins": out += inst_name + "/" out += constraint.get_attr_value("dest") + "]\n" elif constr_type == "input_delay": out += "set_input_delay -clock " + inst_name + "_" + \ constraint.get_attr_value("src") + " " + \ constraint.get_attr_value("value") + " " + \ "[get_" + sig_type + " " + inst_name if sig_type == "ports": out += "_" else: out += "/" out += constraint.get_attr_value("dest") + "]\n" else: raise PodError("component " + instance.name + " has an unknown type " + constr_type, 0) return out @classmethod def addforcepinout(cls, port): """ Generate line for pin """ constr = port.get_attr_value("constr_hidden") if constr is not None and constr == "1": return "" out = 'NET "force_' + str(port.name) out += '" LOC="' + str(port.position) + \ '" | IOSTANDARD=' + str(port.standard) if port.getDrive() is not None: out += " | DRIVE=" + str(port.drive) out += r'; # ' + str(port.name) + '\n' return out @classmethod def addclockconstraints(cls, connect, frequency): """ Generate clock constraints """ out = "NET \"" + connect["instance_dest"] + \ "_" + connect["port_dest"] + '" TNM_NET = "' + \ connect["instance_dest"] + "_" + connect["port_dest"] + \ "\";\n" out += "TIMESPEC \"TS_" + connect["instance_dest"] + \ "_" + connect["port_dest"] + '" = PERIOD "' + \ connect["instance_dest"] + "_" + connect["port_dest"] + \ "\" " + "%g" % ((1000 / float(frequency))) + \ " ns HIGH 50 %;\n" return out def addpinconstraints(self, connect, port): """ Generate constraints for a pin """ constr = port.get_attr_value("constr_hidden") if constr is not None and constr == "1": return "" instancedest =\ self.project.get_instance(connect["instance_dest"]) interfacedest = \ instancedest.get_interface(connect["interface_dest"]) portdest = interfacedest.get_port(connect["port_dest"]) get_ports = "[get_ports " if portdest.size != 1: get_ports += '{' get_ports += connect["instance_dest"] + \ "_" + connect["port_dest"] if portdest.size != 1: if portdest.is_fully_connected(): get_ports += "[" + connect["pin_dest"] + "]" else: get_ports += "_pin" + connect["pin_dest"] get_ports += '}' get_ports += ']' out = 'set_property PACKAGE_PIN ' + str(port.position) out += " " + get_ports + "\n" # TODO # if portdest.getPortOption() != None: # out = out + ' | '+str(portdest.getPortOption()) # elif port.getPortOption() != None: # out = out + ' | '+str(port.getPortOption()) out += 'set_property IOSTANDARD ' if portdest.standard is not None: out += str(portdest.standard) + " " else: out += str(port.standard) out += " " + get_ports + "\n" # if portdest.getDrive() != None: # out = out + " | DRIVE="+str(portdest.getDrive()) # elif port.getDrive() != None: # out = out + " | DRIVE="+str(port.getDrive()) # out = out+r'; # '+str(port.name)+'\n' return out def project_base_creation(self): """ return string for project creation """ platform = self.project.platform proj_name = self.project.name out = "# Set the reference directory for source file relative " + \ "paths (by default the value is script directory path)\n" out += 'set origin_dir "..' + OBJSPATH + '/"' out += "\n" out += "\n" out += "# Create project\n" out += "create_project -part " + platform.device + \ " " + self.project.name + "\n" out += "\n" out += "# Set the directory path for the new project\n" out += "set proj_dir [get_property directory [current_project]]\n" out += "\n" out += "# Set project properties\n" out += "set obj [get_projects " + proj_name + "]\n" if platform.board_part is not None: out += 'set_property "board_part" "' + \ platform.board_part + '" $obj\n' out += 'set_property "default_lib" "xil_defaultlib" $obj\n' out += 'set_property "simulator_language" "Mixed" $obj\n' out += 'set_property "target_language" "VHDL" $obj\n' out += "\n" return out def project_base_configuration(self): """ return basic project configuration """ out = "# Create 'sources_1' fileset (if not found)\n" out += "if {[string equal [get_filesets -quiet sources_1] \"\"]} {\n" out += " create_fileset -srcset sources_1\n" out += "}\n" out += "\n" out += "# Set 'sources_1' fileset object\n" out += "set obj [get_filesets sources_1]\n" out += "set files [list \\\n" out += ' "[file normalize "..' + SYNTHESISPATH + "/top_" + \ self.project.name + VHDLEXT + '"]"\\\n' out += "]\n" out += "add_files -norecurse -fileset $obj $files\n" out += "\n" out += "# Set 'sources_1' fileset file properties for remote files\n" out += "\n" out += "# Set 'sources_1' fileset file properties for local files\n" out += "# None\n" out += "\n" out += "# Set 'sources_1' fileset properties\n" out += "set obj [get_filesets sources_1]\n" out += 'set_property "top" "top_' + self.project.name + '" $obj\n' out += "\n" out += "# Create 'constrs_1' fileset (if not found)\n" out += "if {[string equal [get_filesets -quiet constrs_1] \"\"]} {\n" out += " create_fileset -constrset constrs_1\n" out += "}\n" out += "\n" return out @classmethod def add_file_to_tcl(cls, filename): out = "set obj [get_filesets sources_1]\n" out += 'set file "[file normalize "' + filename + '"]"\n' out += "set file_added [add_files -norecurse -fileset $obj $file]\n" return out def insert_tools_specific_commands(self): """ return lines for misc stuff specific to a tool """ platform = self.project.platform proj_name = self.project.name out = "# Create 'sim_1' fileset (if not found)\n" out += 'if {[string equal [get_filesets -quiet sim_1] ""]} {\n' out += " create_fileset -simset sim_1\n" out += "}\n\n" out += "# Set 'sim_1' fileset object\n" out += "set obj [get_filesets sim_1]\n" out += "# Empty (no sources present)\n\n" out += "# Set 'sim_1' fileset properties\n" out += "set obj [get_filesets sim_1]\n" out += 'set_property "top" "top_' + self.project.name + '" $obj\n' out += "\n" out += "# Create 'synth_1' run (if not found)\n" out += "if {[string equal [get_runs -quiet synth_1] \"\"]} {\n" out += " create_run -name synth_1 -part " + platform.device + \ ' -flow {Vivado Synthesis ' + self.base_version + '} ' + \ '-strategy "Vivado Synthesis Defaults" -constrset constrs_1\n' out += "} else {\n" out += ' set_property strategy "Vivado Synthesis Defaults" ' + \ "[get_runs synth_1]\n" out += ' set_property flow "Vivado Synthesis ' + \ self.base_version + '" [get_runs synth_1]\n' out += "}\n" out += "set obj [get_runs synth_1]\n\n" out += "# set the current synth run\n" out += "current_run -synthesis [get_runs synth_1]\n\n" out += "# Create 'impl_1' run (if not found)\n" out += "if {[string equal [get_runs -quiet impl_1] \"\"]} {\n" out += " create_run -name impl_1 -part " + platform.device + \ " -flow {Vivado Implementation " + self.base_version + "} " + \ '-strategy "Vivado Implementation Defaults" ' + \ '-constrset constrs_1 -parent_run synth_1\n' out += "} else {\n" out += ' set_property strategy "Vivado Implementation Defaults" ' + \ "[get_runs impl_1]\n" out += ' set_property flow "Vivado Implementation ' + \ self.base_version + '" [get_runs impl_1]\n' out += "}\n" out += "set obj [get_runs impl_1]\n\n" out += 'set_property "needs_refresh" "1" $obj\n' out += 'set_property "steps.write_bitstream.args.bin_file" "1" $obj\n' list_bd_comp = self.need_block_design() if len(list_bd_comp): out += "load_features ipintegrator\n" for component in list_bd_comp: bd_name = component.name + "_bd" + TCLEXT self.generate_block_design(component) out += "source .." + SYNTHESISPATH + "/" + bd_name + "\n" out += "\n\n" out += "# set the current impl run\n" out += "current_run -implementation [get_runs impl_1]\n\n" out += 'puts "INFO: Project created: ' + proj_name + '"\n' out += "# set the current impl run\n" out += "current_run -implementation [get_runs impl_1]\n" if len(list_bd_comp): for component in list_bd_comp: out += "generate_target all [get_files " + \ "./" + proj_name + ".srcs/sources_1/bd/" + \ component.name + "_bd/" + component.name + "_bd.bd]\n" return out @classmethod def insert_tools_gen_cmds(cls): """ return lines for bitstream generation """ out = "launch_runs synth_1\n" out += "wait_on_run synth_1\n" out += "## do implementation\n" out += "launch_runs impl_1\n" out += "wait_on_run impl_1\n" out += "## make bit file\n" out += "launch_runs impl_1 -to_step write_bitstream\n" out += "wait_on_run impl_1\n" out += "exit\n" return out @property def ext_files(self): """ return list of bitstream files extension """ return [XILINX_BITSTREAM_SUFFIX, XILINX_BINARY_SUFFIX] def generate_bitstream(self): """ generate the bitstream """ commandname = self.synthesis_toolcommandname scriptpath = os.path.join(self.parent.projectpath + SYNTHESISPATH, self.tcl_scriptname) pwd = sy.pwd() sy.del_all(self.project.projectpath + OBJSPATH) sy.chdir(self.project.projectpath + SYNTHESISPATH) commandname += " -mode tcl" scriptname = "-source " + scriptpath + " -tclargs build" binpath = self.project.projectpath + OBJSPATH + "/" + \ self.project.name + ".runs/impl_1/" for line in sy.launch_as_shell(commandname, scriptname): if SETTINGS.color() == 1: print(COLOR_SHELL + line + COLOR_END), else: print("SHELL>" + line), for ext_file in self.ext_files: try: sy.cp_file(binpath + BINARY_PREFIX + self.project.name + ext_file, self.project.projectpath + BINARYPROJECTPATH + "/") except IOError: raise PodError("Can't copy bitstream") sy.chdir(pwd)
lgpl-2.1
omaciel/pylyglot
translations/views.py
1
2607
# -*- encoding: utf-8 -*- # vim: ts=4 sw=4 expandtab ai # # This file is part of Pylyglot. # # Pylyglot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pylyglot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pylyglot. If not, see <http://www.gnu.org/licenses/>. from django.shortcuts import render_to_response from django.template import RequestContext from django.utils.http import urlencode from core.forms import SearchForm from core.models import Translation from django.db.models import Count from django.views.generic import ListView from django.views.generic import ListView class SearchableTranslationListView(ListView): model = Translation paginate_by = 20 template_name = 'translations/translation_list.html' def get_queryset(self): queryset = super(SearchableTranslationListView, self).get_queryset() self.query = self.request.GET.get('query', '') self.short_name = self.request.GET.get('languages', '') if self.query and self.short_name: return queryset.filter( sentence__msgid__icontains=self.query, language__short_name=self.short_name, obsolete=False, ).values( 'sentence__msgid', 'msgstr', 'sentence__length', 'package__name', ).order_by( 'sentence__length', 'sentence__msgid', 'msgstr' ).distinct() else: return queryset.none() def get_context_data(self, **kwargs): kwargs.update({ 'query': self.query, 'short_name': self.short_name, 'form': SearchForm(self.request.GET or None), 'is_searching': ('query' in self.request.GET and 'languages' in self.request.GET), 'pagination_extra': urlencode({ 'languages': self.short_name, 'query': self.query, }), }) return super(SearchableTranslationListView, self).get_context_data(**kwargs)
gpl-3.0
jim-thisplace/exercises-in-programming-style
33-restful/tf-33.py
17
3653
#!/usr/bin/env python import re, string, sys with open("../stop_words.txt") as f: stops = set(f.read().split(",")+list(string.ascii_lowercase)) # The "database" data = {} # Internal functions of the "server"-side application def error_state(): return "Something wrong", ["get", "default", None] # The "server"-side application handlers def default_get_handler(args): rep = "What would you like to do?" rep += "\n1 - Quit" + "\n2 - Upload file" links = {"1" : ["post", "execution", None], "2" : ["get", "file_form", None]} return rep, links def quit_handler(args): sys.exit("Goodbye cruel world...") def upload_get_handler(args): return "Name of file to upload?", ["post", "file"] def upload_post_handler(args): def create_data(filename): if filename in data: return word_freqs = {} with open(filename) as f: for w in [x.lower() for x in re.split("[^a-zA-Z]+", f.read()) if len(x) > 0 and x.lower() not in stops]: word_freqs[w] = word_freqs.get(w, 0) + 1 word_freqsl = word_freqs.items() word_freqsl.sort(lambda x, y: cmp(y[1], x[1])) data[filename] = word_freqsl if args == None: return error_state() filename = args[0] try: create_data(filename) except: return error_state() return word_get_handler([filename, 0]) def word_get_handler(args): def get_word(filename, word_index): if word_index < len(data[filename]): return data[filename][word_index] else: return ("no more words", 0) filename = args[0]; word_index = args[1] word_info = get_word(filename, word_index) rep = '\n#{0}: {1} - {2}'.format(word_index+1, word_info[0], word_info[1]) rep += "\n\nWhat would you like to do next?" rep += "\n1 - Quit" + "\n2 - Upload file" rep += "\n3 - See next most-frequently occurring word" links = {"1" : ["post", "execution", None], "2" : ["get", "file_form", None], "3" : ["get", "word", [filename, word_index+1]]} return rep, links # Handler registration handlers = {"post_execution" : quit_handler, "get_default" : default_get_handler, "get_file_form" : upload_get_handler, "post_file" : upload_post_handler, "get_word" : word_get_handler } # The "server" core def handle_request(verb, uri, args): def handler_key(verb, uri): return verb + "_" + uri if handler_key(verb, uri) in handlers: return handlers[handler_key(verb, uri)](args) else: return handlers[handler_key("get", "default")](args) # A very simple client "browser" def render_and_get_input(state_representation, links): print state_representation sys.stdout.flush() if type(links) is dict: # many possible next states input = sys.stdin.readline().strip() if input in links: return links[input] else: return ["get", "default", None] elif type(links) is list: # only one possible next state if links[0] == "post": # get "form" data input = sys.stdin.readline().strip() links.append([input]) # add the data at the end return links else: # get action, don't get user input return links else: return ["get", "default", None] request = ["get", "default", None] while True: # "server"-side computation state_representation, links = handle_request(*request) # "client"-side computation request = render_and_get_input(state_representation, links)
mit
ivanmarcin/kubernetes
cluster/saltbase/salt/_states/container_bridge.py
96
5593
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import salt.exceptions import salt.utils.ipaddr as ipaddr def ensure(name, cidr, mtu=1460): ''' Ensure that a bridge (named <name>) is configured for contianers. Under the covers we will make sure that - The bridge exists - The MTU is set - The correct network is added to the bridge - iptables is set up for MASQUARADE for egress cidr: The cidr range in the form of 10.244.x.0/24 mtu: The MTU to set on the interface ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} iptables_rule = { 'table': 'nat', 'chain': 'POSTROUTING', 'rule': '-o eth0 -j MASQUERADE \! -d 10.0.0.0/8' } def bridge_exists(name): 'Determine if a bridge exists already.' out = __salt__['cmd.run_stdout']('brctl show {0}'.format(name)) for line in out.splitlines(): # get rid of first line if line.startswith('bridge name'): continue # get rid of ^\n's vals = line.split() if not vals: continue if len(vals) > 1: return True return False def get_ip_addr_details(name): 'For the given interface, get address details.' out = __salt__['cmd.run']('ip addr show dev {0}'.format(name)) ret = { 'networks': [] } for line in out.splitlines(): match = re.match( r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>.*mtu (\d+)', line) if match: iface, parent, attrs, mtu = match.groups() if 'UP' in attrs.split(','): ret['up'] = True else: ret['up'] = False if parent: ret['parent'] = parent ret['mtu'] = int(mtu) continue cols = line.split() if len(cols) > 2 and cols[0] == 'inet': ret['networks'].append(cols[1]) return ret def get_current_state(): 'Helper that returns a dict of current bridge state.' ret = {} ret['name'] = name ret['exists'] = bridge_exists(name) if ret['exists']: ret['details'] = get_ip_addr_details(name) else: ret['details'] = {} # This module function is strange and returns True if the rule exists. # If not, it returns a string with the error from the call to iptables. ret['iptables_rule_exists'] = \ __salt__['iptables.check'](**iptables_rule) == True return ret # This is a little hacky. I should probably import a real library for this # but this'll work for now. try: cidr_network = ipaddr.IPv4Network(cidr, strict=True) except Exception: raise salt.exceptions.SaltInvocationError( 'Invalid CIDR \'{0}\''.format(cidr)) desired_network = '{0}/{1}'.format( str(ipaddr.IPv4Address(cidr_network._ip + 1)), str(cidr_network.prefixlen)) current_state = get_current_state() if (current_state['exists'] and current_state['details']['mtu'] == mtu and desired_network in current_state['details']['networks'] and current_state['details']['up'] and current_state['iptables_rule_exists']): ret['result'] = True ret['comment'] = 'System already in the correct state' return ret # The state of the system does need to be changed. Check if we're running # in ``test=true`` mode. if __opts__['test'] == True: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = { 'old': current_state, 'new': 'Create and configure bridge' } # Return ``None`` when running with ``test=true``. ret['result'] = None return ret # Finally, make the actual change and return the result. if not current_state['exists']: __salt__['cmd.run']('brctl addbr {0}'.format(name)) new_state = get_current_state() if new_state['details']['mtu'] != mtu: __salt__['cmd.run']( 'ip link set dev {0} mtu {1}'.format(name, str(mtu))) new_state = get_current_state() if desired_network not in new_state['details']['networks']: __salt__['cmd.run']( 'ip addr add {0} dev {1}'.format(desired_network, name)) new_state = get_current_state() if not new_state['details']['up']: __salt__['cmd.run']( 'ip link set dev {0} up'.format(name)) new_state = get_current_state() if not new_state['iptables_rule_exists']: __salt__['iptables.append'](**iptables_rule) new_state = get_current_state() ret['comment'] = 'The state of "{0}" was changed!'.format(name) ret['changes'] = { 'old': current_state, 'new': new_state, } ret['result'] = True return ret
apache-2.0
morreene/tradenews
venv/Lib/site-packages/flask/testsuite/reqctx.py
557
5960
# -*- coding: utf-8 -*- """ flask.testsuite.reqctx ~~~~~~~~~~~~~~~~~~~~~~ Tests the request context. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import flask import unittest try: from greenlet import greenlet except ImportError: greenlet = None from flask.testsuite import FlaskTestCase class RequestContextTestCase(FlaskTestCase): def test_teardown_on_pop(self): buffer = [] app = flask.Flask(__name__) @app.teardown_request def end_of_request(exception): buffer.append(exception) ctx = app.test_request_context() ctx.push() self.assert_equal(buffer, []) ctx.pop() self.assert_equal(buffer, [None]) def test_proper_test_request_context(self): app = flask.Flask(__name__) app.config.update( SERVER_NAME='localhost.localdomain:5000' ) @app.route('/') def index(): return None @app.route('/', subdomain='foo') def sub(): return None with app.test_request_context('/'): self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/') with app.test_request_context('/'): self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/') try: with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}): pass except Exception as e: self.assert_true(isinstance(e, ValueError)) self.assert_equal(str(e), "the server name provided " + "('localhost.localdomain:5000') does not match the " + \ "server name from the WSGI environment ('localhost')") try: app.config.update(SERVER_NAME='localhost') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) try: app.config.update(SERVER_NAME='localhost:80') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) def test_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] @app.route('/meh') def meh(): return flask.request.url with app.test_request_context('/?name=World'): self.assert_equal(index(), 'Hello World!') with app.test_request_context('/meh'): self.assert_equal(meh(), 'http://localhost/meh') self.assert_true(flask._request_ctx_stack.top is None) def test_context_test(self): app = flask.Flask(__name__) self.assert_false(flask.request) self.assert_false(flask.has_request_context()) ctx = app.test_request_context() ctx.push() try: self.assert_true(flask.request) self.assert_true(flask.has_request_context()) finally: ctx.pop() def test_manual_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] ctx = app.test_request_context('/?name=World') ctx.push() self.assert_equal(index(), 'Hello World!') ctx.pop() try: index() except RuntimeError: pass else: self.assert_true(0, 'expected runtime error') def test_greenlet_context_copying(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() def g(): self.assert_false(flask.request) self.assert_false(flask.current_app) with reqctx: self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') self.assert_false(flask.request) return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) def test_greenlet_context_copying_api(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() @flask.copy_current_request_context def g(): self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) # Disable test if we don't have greenlets available if greenlet is None: test_greenlet_context_copying = None test_greenlet_context_copying_api = None def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(RequestContextTestCase)) return suite
bsd-3-clause
lyoshenka/PyPagekite
pagekite/ui/basic.py
2
9580
""" This is the "basic" text-mode user interface class. """ ############################################################################# LICENSE = """\ This file is part of pagekite.py. Copyright 2010-2013, the Beanstalks Project ehf. and Bjarni Runar Einarsson This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see: <http://www.gnu.org/licenses/> """ ############################################################################# import re import sys import time from nullui import NullUi from pagekite.common import * HTML_BR_RE = re.compile(r'<(br|/p|/li|/tr|/h\d)>\s*') HTML_LI_RE = re.compile(r'<li>\s*') HTML_NBSP_RE = re.compile(r'&nbsp;') HTML_TAGS_RE = re.compile(r'<[^>\s][^>]*>') def clean_html(text): return HTML_LI_RE.sub(' * ', HTML_NBSP_RE.sub('_', HTML_BR_RE.sub('\n', text))) def Q(text): return HTML_TAGS_RE.sub('', clean_html(text)) class BasicUi(NullUi): """Stdio based user interface.""" DAEMON_FRIENDLY = False WANTS_STDERR = True EMAIL_RE = re.compile(r'^[a-z0-9!#$%&\'\*\+\/=?^_`{|}~-]+' '(?:\.[a-z0-9!#$%&\'*+/=?^_`{|}~-]+)*@' '(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)*' '(?:[a-zA-Z]{2,4}|museum)$') def Notify(self, message, prefix=' ', popup=False, color=None, now=None, alignright=''): now = int(now or time.time()) color = color or self.NORM # We suppress duplicates that are either new or still on the screen. keys = self.notify_history.keys() if len(keys) > 20: for key in keys: if self.notify_history[key] < now-300: del self.notify_history[key] message = '%s' % message if message not in self.notify_history: # Display the time now and then. if (not alignright and (now >= (self.last_tick + 60)) and (len(message) < 68)): try: self.last_tick = now d = datetime.datetime.fromtimestamp(now) alignright = '[%2.2d:%2.2d]' % (d.hour, d.minute) except: pass # Fails on Python 2.2 if not now or now > 0: self.notify_history[message] = now msg = '\r%s %s%s%s%s%s\n' % ((prefix * 3)[0:3], color, message, self.NORM, ' ' * (75-len(message)-len(alignright)), alignright) self.wfile.write(msg) self.Status(self.status_tag, self.status_msg) def NotifyMOTD(self, frontend, motd_message): lc = 1 self.Notify(' ') for line in Q(motd_message).splitlines(): self.Notify((line.strip() or ' ' * (lc+2)), prefix=' ++', color=self.WHITE) lc += 1 self.Notify(' ' * (lc+2), alignright='[MOTD from %s]' % frontend) self.Notify(' ') def Status(self, tag, message=None, color=None): self.status_tag = tag self.status_col = color or self.status_col or self.NORM self.status_msg = '%s' % (message or self.status_msg) if not self.in_wizard: message = self.status_msg msg = ('\r << pagekite.py [%s]%s %s%s%s\r%s' ) % (tag, ' ' * (8-len(tag)), self.status_col, message[:52], ' ' * (52-len(message)), self.NORM) self.wfile.write(msg) if tag == 'exiting': self.wfile.write('\n') def Welcome(self, pre=None): if self.in_wizard: self.wfile.write('%s%s%s' % (self.CLEAR, self.WHITE, self.in_wizard)) if self.welcome: self.wfile.write('%s\r%s\n' % (self.NORM, Q(self.welcome))) self.welcome = None if self.in_wizard and self.wizard_tell: self.wfile.write('\n%s\r' % self.NORM) for line in self.wizard_tell: self.wfile.write('*** %s\n' % Q(line)) self.wizard_tell = None if pre: self.wfile.write('\n%s\r' % self.NORM) for line in pre: self.wfile.write(' %s\n' % Q(line)) self.wfile.write('\n%s\r' % self.NORM) def StartWizard(self, title): self.Welcome() banner = '>>> %s' % title banner = ('%s%s[CTRL+C = Cancel]\n') % (banner, ' ' * (62-len(banner))) self.in_wizard = banner self.tries = 200 def Retry(self): self.tries -= 1 return self.tries def EndWizard(self, quietly=False): if self.wizard_tell: self.Welcome() self.in_wizard = None if sys.platform in ('win32', 'os2', 'os2emx') and not quietly: self.wfile.write('\n<<< press ENTER to continue >>>\n') self.rfile.readline() def Spacer(self): self.wfile.write('\n') def Readline(self): line = self.rfile.readline() if line: return line.strip() else: raise IOError('EOF') def AskEmail(self, question, default=None, pre=[], wizard_hint=False, image=None, back=None, welcome=True): if welcome: self.Welcome(pre) while self.Retry(): self.wfile.write(' => %s ' % (Q(question), )) answer = self.Readline() if default and answer == '': return default if self.EMAIL_RE.match(answer.lower()): return answer if back is not None and answer == 'back': return back raise Exception('Too many tries') def AskLogin(self, question, default=None, email=None, pre=None, wizard_hint=False, image=None, back=None): self.Welcome(pre) def_email, def_pass = default or (email, None) self.wfile.write(' %s\n' % (Q(question), )) if not email: email = self.AskEmail('Your e-mail:', default=def_email, back=back, welcome=False) if email == back: return back import getpass self.wfile.write(' => ') return (email, getpass.getpass() or def_pass) def AskYesNo(self, question, default=None, pre=[], yes='yes', no='no', wizard_hint=False, image=None, back=None): self.Welcome(pre) yn = ((default is True) and '[Y/n]' ) or ((default is False) and '[y/N]' ) or ('[y/n]') while self.Retry(): self.wfile.write(' => %s %s ' % (Q(question), yn)) answer = self.Readline().lower() if default is not None and answer == '': answer = default and 'y' or 'n' if back is not None and answer.startswith('b'): return back if answer in ('y', 'n'): return (answer == 'y') raise Exception('Too many tries') def AskQuestion(self, question, pre=[], default=None, prompt=' =>', wizard_hint=False, image=None, back=None): self.Welcome(pre) self.wfile.write('%s %s ' % (prompt, Q(question))) return self.Readline() def AskKiteName(self, domains, question, pre=[], default=None, wizard_hint=False, image=None, back=None): self.Welcome(pre) if len(domains) == 1: self.wfile.write(('\n (Note: the ending %s will be added for you.)' ) % domains[0]) else: self.wfile.write('\n Please use one of the following domains:\n') for domain in domains: self.wfile.write('\n *%s' % domain) self.wfile.write('\n') while self.Retry(): self.wfile.write('\n => %s ' % Q(question)) answer = self.Readline().lower() if back is not None and answer == 'back': return back elif len(domains) == 1: answer = answer.replace(domains[0], '') if answer and SERVICE_SUBDOMAIN_RE.match(answer): return answer+domains[0] else: for domain in domains: if answer.endswith(domain): answer = answer.replace(domain, '') if answer and SERVICE_SUBDOMAIN_RE.match(answer): return answer+domain self.wfile.write(' (Please only use characters A-Z, 0-9, - and _.)') raise Exception('Too many tries') def AskMultipleChoice(self, choices, question, pre=[], default=None, wizard_hint=False, image=None, back=None): self.Welcome(pre) for i in range(0, len(choices)): self.wfile.write((' %s %d) %s\n' ) % ((default==i+1) and '*' or ' ', i+1, choices[i])) self.wfile.write('\n') while self.Retry(): d = default and (', default=%d' % default) or '' self.wfile.write(' => %s [1-%d%s] ' % (Q(question), len(choices), d)) try: answer = self.Readline().strip() if back is not None and answer.startswith('b'): return back choice = int(answer or default) if choice > 0 and choice <= len(choices): return choice except (ValueError, IndexError): pass raise Exception('Too many tries') def Tell(self, lines, error=False, back=None): if self.in_wizard: self.wizard_tell = lines else: self.Welcome() for line in lines: self.wfile.write(' %s\n' % line) if error: self.wfile.write('\n') return True def Working(self, message): if self.in_wizard: pending_messages = self.wizard_tell or [] self.wizard_tell = pending_messages + [message+' ...'] self.Welcome() self.wizard_tell = pending_messages + [message+' ... done.'] else: self.Tell([message]) return True
agpl-3.0
jhawkesworth/ansible
lib/ansible/modules/network/dellos10/dellos10_command.py
39
7311
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2015, Peter Sprygada <[email protected]> # Copyright: (c) 2017, Dell Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: dellos10_command version_added: "2.2" author: "Senthil Kumar Ganesan (@skg-net)" short_description: Run commands on remote devices running Dell OS10 description: - Sends arbitrary commands to a Dell EMC OS10 node and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. - This module does not support running commands in configuration mode. Please use M(dellos10_config) to configure Dell EMC OS10 devices. extends_documentation_fragment: dellos10 options: commands: description: - List of commands to send to the remote dellos10 device over the configured provider. The resulting output from the command is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired. type: list required: true wait_for: description: - List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of I(retries), the task fails. See examples. type: list version_added: "2.2" match: description: - The I(match) argument is used in conjunction with the I(wait_for) argument to specify the match policy. Valid values are C(all) or C(any). If the value is set to C(all) then all conditionals in the wait_for must be satisfied. If the value is set to C(any) then only one of the values must be satisfied. type: str default: all choices: [ all, any ] version_added: "2.5" retries: description: - Specifies the number of retries a command should be tried before it is considered failed. The command is run on the target device every retry and evaluated against the I(wait_for) conditions. type: int default: 10 interval: description: - Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again. type: int default: 1 """ EXAMPLES = """ tasks: - name: run show version on remote devices dellos10_command: commands: show version - name: run show version and check to see if output contains OS10 dellos10_command: commands: show version wait_for: result[0] contains OS10 - name: run multiple commands on remote nodes dellos10_command: commands: - show version - show interface - name: run multiple commands and evaluate the output dellos10_command: commands: - show version - show interface wait_for: - result[0] contains OS10 - result[1] contains Ethernet """ RETURN = """ stdout: description: The set of responses from the commands returned: always apart from low level errors (such as action plugin) type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always apart from low level errors (such as action plugin) type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: The list of conditionals that have failed returned: failed type: list sample: ['...', '...'] warnings: description: The list of warnings (if any) generated by module based on arguments returned: always type: list sample: ['...', '...'] """ import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.dellos10.dellos10 import run_commands from ansible.module_utils.network.dellos10.dellos10 import dellos10_argument_spec, check_args from ansible.module_utils.network.common.utils import ComplexList from ansible.module_utils.network.common.parsing import Conditional from ansible.module_utils.six import string_types def to_lines(stdout): for item in stdout: if isinstance(item, string_types): item = str(item).split('\n') yield item def parse_commands(module, warnings): command = ComplexList(dict( command=dict(key=True), prompt=dict(), answer=dict() ), module) commands = command(module.params['commands']) for index, item in enumerate(commands): if module.check_mode and not item['command'].startswith('show'): warnings.append( 'only show commands are supported when using check mode, not ' 'executing `%s`' % item['command'] ) elif item['command'].startswith('conf'): module.fail_json( msg='dellos10_command does not support running config mode ' 'commands. Please use dellos10_config instead' ) return commands def main(): """main entry point for module execution """ argument_spec = dict( # { command: <str>, prompt: <str>, response: <str> } commands=dict(type='list', required=True), wait_for=dict(type='list'), match=dict(default='all', choices=['all', 'any']), retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) argument_spec.update(dellos10_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = {'changed': False} warnings = list() check_args(module, warnings) commands = parse_commands(module, warnings) result['warnings'] = warnings wait_for = module.params['wait_for'] or list() conditionals = [Conditional(c) for c in wait_for] retries = module.params['retries'] interval = module.params['interval'] match = module.params['match'] while retries > 0: responses = run_commands(module, commands) for item in list(conditionals): if item(responses): if match == 'any': conditionals = list() break conditionals.remove(item) if not conditionals: break time.sleep(interval) retries -= 1 if conditionals: failed_conditions = [item.raw for item in conditionals] msg = 'One or more conditional statements have not been satisfied' module.fail_json(msg=msg, failed_conditions=failed_conditions) result.update({ 'changed': False, 'stdout': responses, 'stdout_lines': list(to_lines(responses)) }) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
2ndQuadrant/ansible
lib/ansible/modules/cloud/vmware/vmware_host_package_facts.py
47
4254
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vmware_host_package_facts short_description: Gathers facts about available packages on an ESXi host description: - This module can be used to gather facts about available packages and their status on an ESXi host. version_added: '2.5' author: - Abhijeet Kasurde (@Akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: cluster_name: description: - Name of the cluster. - Package facts about each ESXi server will be returned for given cluster. - If C(esxi_hostname) is not given, this parameter is required. esxi_hostname: description: - ESXi hostname. - Package facts about this ESXi server will be returned. - If C(cluster_name) is not given, this parameter is required. extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Gather facts about all ESXi Host in given Cluster vmware_host_package_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' cluster_name: cluster_name delegate_to: localhost register: cluster_host_packages - name: Gather facts about ESXi Host vmware_host_package_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_packages ''' RETURN = r''' hosts_package_facts: description: - dict with hostname as key and dict with package facts as value returned: hosts_package_facts type: dict sample: { "hosts_package_facts": { "localhost.localdomain": []}} ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi class VmwarePackageManager(PyVmomi): def __init__(self, module): super(VmwarePackageManager, self).__init__(module) cluster_name = self.params.get('cluster_name', None) esxi_host_name = self.params.get('esxi_hostname', None) self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) def gather_package_facts(self): hosts_facts = {} for host in self.hosts: host_package_facts = [] host_pkg_mgr = host.configManager.imageConfigManager if host_pkg_mgr: pkgs = host_pkg_mgr.FetchSoftwarePackages() for pkg in pkgs: host_package_facts.append(dict(name=pkg.name, version=pkg.version, vendor=pkg.vendor, summary=pkg.summary, description=pkg.description, acceptance_level=pkg.acceptanceLevel, maintenance_mode_required=pkg.maintenanceModeRequired, creation_date=pkg.creationDate, ) ) hosts_facts[host.name] = host_package_facts return hosts_facts def main(): argument_spec = vmware_argument_spec() argument_spec.update( cluster_name=dict(type='str', required=False), esxi_hostname=dict(type='str', required=False), ) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[ ['cluster_name', 'esxi_hostname'], ] ) vmware_host_package_config = VmwarePackageManager(module) module.exit_json(changed=False, hosts_package_facts=vmware_host_package_config.gather_package_facts()) if __name__ == "__main__": main()
gpl-3.0
anilmuthineni/tensorflow
tensorflow/python/saved_model/main_op.py
5
2262
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel main op. Builds a main op that defines the sequence of ops to be run as part of the SavedModel load/restore operations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops as tf_data_flow_ops from tensorflow.python.ops import variables def main_op(): """Returns a main op to init variables and tables. Returns the main op including the group of ops that initializes all variables, initializes local variables and initialize all tables. Returns: The set of ops to be run as part of the main op upon the load operation. """ init = variables.global_variables_initializer() init_local = variables.local_variables_initializer() init_tables = tf_data_flow_ops.tables_initializer() return control_flow_ops.group(init, init_local, init_tables) def main_op_with_restore(restore_op_name): """Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation. """ with ops.control_dependencies([main_op()]): main_op_with_restore = control_flow_ops.group(restore_op_name) return main_op_with_restore
apache-2.0
Tudorvr/metagoofil
hachoir_parser/program/exe_res.py
95
15292
""" Parser for resource of Microsoft Windows Portable Executable (PE). Documentation: - Wine project VS_FIXEDFILEINFO structure, file include/winver.h Author: Victor Stinner Creation date: 2007-01-19 """ from hachoir_core.field import (FieldSet, ParserError, Enum, Bit, Bits, SeekableFieldSet, UInt16, UInt32, TimestampUnix32, RawBytes, PaddingBytes, NullBytes, NullBits, CString, String) from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal from hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable from hachoir_core.error import HACHOIR_ERRORS from hachoir_parser.common.win32 import BitmapInfoHeader MAX_DEPTH = 5 MAX_INDEX_PER_HEADER = 300 MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER class Version(FieldSet): static_size = 32 def createFields(self): yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal) yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal) def createValue(self): return self["major"].value + float(self["minor"].value) / 10000 MAJOR_OS_NAME = { 1: "DOS", 2: "OS/2 16-bit", 3: "OS/2 32-bit", 4: "Windows NT", } MINOR_OS_BASE = 0 MINOR_OS_NAME = { 0: "Base", 1: "Windows 16-bit", 2: "Presentation Manager 16-bit", 3: "Presentation Manager 32-bit", 4: "Windows 32-bit", } FILETYPE_DRIVER = 3 FILETYPE_FONT = 4 FILETYPE_NAME = { 1: "Application", 2: "DLL", 3: "Driver", 4: "Font", 5: "VXD", 7: "Static library", } DRIVER_SUBTYPE_NAME = { 1: "Printer", 2: "Keyboard", 3: "Language", 4: "Display", 5: "Mouse", 6: "Network", 7: "System", 8: "Installable", 9: "Sound", 10: "Communications", } FONT_SUBTYPE_NAME = { 1: "Raster", 2: "Vector", 3: "TrueType", } class VersionInfoBinary(FieldSet): def createFields(self): yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal) if self["magic"].value != 0xFEEF04BD: raise ParserError("EXE resource: invalid file info magic") yield Version(self, "struct_ver", "Structure version (1.0)") yield Version(self, "file_ver_ms", "File version MS") yield Version(self, "file_ver_ls", "File version LS") yield Version(self, "product_ver_ms", "Product version MS") yield Version(self, "product_ver_ls", "Product version LS") yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal) yield Bit(self, "debug") yield Bit(self, "prerelease") yield Bit(self, "patched") yield Bit(self, "private_build") yield Bit(self, "info_inferred") yield Bit(self, "special_build") yield NullBits(self, "reserved", 26) yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME) yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME) yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME) field = textHandler(UInt32(self, "file_subfile"), hexadecimal) if field.value == FILETYPE_DRIVER: field = Enum(field, DRIVER_SUBTYPE_NAME) elif field.value == FILETYPE_FONT: field = Enum(field, FONT_SUBTYPE_NAME) yield field yield TimestampUnix32(self, "date_ms") yield TimestampUnix32(self, "date_ls") class VersionInfoNode(FieldSet): TYPE_STRING = 1 TYPE_NAME = { 0: "binary", 1: "string", } def __init__(self, parent, name, is_32bit=True): FieldSet.__init__(self, parent, name) self._size = alignValue(self["size"].value, 4) * 8 self.is_32bit = is_32bit def createFields(self): yield UInt16(self, "size", "Node size (in bytes)") yield UInt16(self, "data_size") yield Enum(UInt16(self, "type"), self.TYPE_NAME) yield CString(self, "name", charset="UTF-16-LE") size = paddingSize(self.current_size//8, 4) if size: yield NullBytes(self, "padding[]", size) size = self["data_size"].value if size: if self["type"].value == self.TYPE_STRING: if self.is_32bit: size *= 2 yield String(self, "value", size, charset="UTF-16-LE", truncate="\0") elif self["name"].value == "VS_VERSION_INFO": yield VersionInfoBinary(self, "value", size=size*8) if self["value/file_flags_mask"].value == 0: self.is_32bit = False else: yield RawBytes(self, "value", size) while 12 <= (self.size - self.current_size) // 8: yield VersionInfoNode(self, "node[]", self.is_32bit) size = (self.size - self.current_size) // 8 if size: yield NullBytes(self, "padding[]", size) def createDescription(self): text = "Version info node: %s" % self["name"].value if self["type"].value == self.TYPE_STRING and "value" in self: text += "=%s" % self["value"].value return text def parseVersionInfo(parent): yield VersionInfoNode(parent, "node[]") def parseIcon(parent): yield BitmapInfoHeader(parent, "bmp_header") size = (parent.size - parent.current_size) // 8 if size: yield RawBytes(parent, "raw", size) class WindowsString(FieldSet): def createFields(self): yield UInt16(self, "length", "Number of 16-bit characters") size = self["length"].value * 2 if size: yield String(self, "text", size, charset="UTF-16-LE") def createValue(self): if "text" in self: return self["text"].value else: return u"" def createDisplay(self): return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"') def parseStringTable(parent): while not parent.eof: yield WindowsString(parent, "string[]") RESOURCE_TYPE = { 1: ("cursor[]", "Cursor", None), 2: ("bitmap[]", "Bitmap", None), 3: ("icon[]", "Icon", parseIcon), 4: ("menu[]", "Menu", None), 5: ("dialog[]", "Dialog", None), 6: ("string_table[]", "String table", parseStringTable), 7: ("font_dir[]", "Font directory", None), 8: ("font[]", "Font", None), 9: ("accelerators[]", "Accelerators", None), 10: ("raw_res[]", "Unformatted resource data", None), 11: ("message_table[]", "Message table", None), 12: ("group_cursor[]", "Group cursor", None), 14: ("group_icon[]", "Group icon", None), 16: ("version_info", "Version information", parseVersionInfo), } class Entry(FieldSet): static_size = 16*8 def __init__(self, parent, name, inode=None): FieldSet.__init__(self, parent, name) self.inode = inode def createFields(self): yield textHandler(UInt32(self, "rva"), hexadecimal) yield filesizeHandler(UInt32(self, "size")) yield UInt32(self, "codepage") yield NullBytes(self, "reserved", 4) def createDescription(self): return "Entry #%u: offset=%s size=%s" % ( self.inode["offset"].value, self["rva"].display, self["size"].display) class NameOffset(FieldSet): def createFields(self): yield UInt32(self, "name") yield Bits(self, "offset", 31) yield Bit(self, "is_name") class IndexOffset(FieldSet): TYPE_DESC = createDict(RESOURCE_TYPE, 1) def __init__(self, parent, name, res_type=None): FieldSet.__init__(self, parent, name) self.res_type = res_type def createFields(self): yield Enum(UInt32(self, "type"), self.TYPE_DESC) yield Bits(self, "offset", 31) yield Bit(self, "is_subdir") def createDescription(self): if self["is_subdir"].value: return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value) else: return "Index: ID %s at %s" % (self["type"].display, self["offset"].value) class ResourceContent(FieldSet): def __init__(self, parent, name, entry, size=None): FieldSet.__init__(self, parent, name, size=entry["size"].value*8) self.entry = entry res_type = self.getResType() if res_type in RESOURCE_TYPE: self._name, description, self._parser = RESOURCE_TYPE[res_type] else: self._parser = None def getResID(self): return self.entry.inode["offset"].value def getResType(self): return self.entry.inode.res_type def createFields(self): if self._parser: for field in self._parser(self): yield field else: yield RawBytes(self, "content", self.size//8) def createDescription(self): return "Resource #%u content: type=%s" % ( self.getResID(), self.getResType()) class Header(FieldSet): static_size = 16*8 def createFields(self): yield NullBytes(self, "options", 4) yield TimestampUnix32(self, "creation_date") yield UInt16(self, "maj_ver", "Major version") yield UInt16(self, "min_ver", "Minor version") yield UInt16(self, "nb_name", "Number of named entries") yield UInt16(self, "nb_index", "Number of indexed entries") def createDescription(self): text = "Resource header" info = [] if self["nb_name"].value: info.append("%u name" % self["nb_name"].value) if self["nb_index"].value: info.append("%u index" % self["nb_index"].value) if self["creation_date"].value: info.append(self["creation_date"].display) if info: return "%s: %s" % (text, ", ".join(info)) else: return text class Name(FieldSet): def createFields(self): yield UInt16(self, "length") size = min(self["length"].value, 255) if size: yield String(self, "name", size, charset="UTF-16LE") class Directory(FieldSet): def __init__(self, parent, name, res_type=None): FieldSet.__init__(self, parent, name) nb_entries = self["header/nb_name"].value + self["header/nb_index"].value self._size = Header.static_size + nb_entries * 64 self.res_type = res_type def createFields(self): yield Header(self, "header") if MAX_NAME_PER_HEADER < self["header/nb_name"].value: raise ParserError("EXE resource: invalid number of name (%s)" % self["header/nb_name"].value) if MAX_INDEX_PER_HEADER < self["header/nb_index"].value: raise ParserError("EXE resource: invalid number of index (%s)" % self["header/nb_index"].value) hdr = self["header"] for index in xrange(hdr["nb_name"].value): yield NameOffset(self, "name[]") for index in xrange(hdr["nb_index"].value): yield IndexOffset(self, "index[]", self.res_type) def createDescription(self): return self["header"].description class PE_Resource(SeekableFieldSet): def __init__(self, parent, name, section, size): SeekableFieldSet.__init__(self, parent, name, size=size) self.section = section def parseSub(self, directory, name, depth): indexes = [] for index in directory.array("index"): if index["is_subdir"].value: indexes.append(index) #indexes.sort(key=lambda index: index["offset"].value) for index in indexes: self.seekByte(index["offset"].value) if depth == 1: res_type = index["type"].value else: res_type = directory.res_type yield Directory(self, name, res_type) def createFields(self): # Parse directories depth = 0 subdir = Directory(self, "root") yield subdir subdirs = [subdir] alldirs = [subdir] while subdirs: depth += 1 if MAX_DEPTH < depth: self.error("EXE resource: depth too high (%s), stop parsing directories" % depth) break newsubdirs = [] for index, subdir in enumerate(subdirs): name = "directory[%u][%u][]" % (depth, index) try: for field in self.parseSub(subdir, name, depth): if field.__class__ == Directory: newsubdirs.append(field) yield field except HACHOIR_ERRORS, err: self.error("Unable to create directory %s: %s" % (name, err)) subdirs = newsubdirs alldirs.extend(subdirs) # Create resource list resources = [] for directory in alldirs: for index in directory.array("index"): if not index["is_subdir"].value: resources.append(index) # Parse entries entries = [] for resource in resources: offset = resource["offset"].value if offset is None: continue self.seekByte(offset) entry = Entry(self, "entry[]", inode=resource) yield entry entries.append(entry) entries.sort(key=lambda entry: entry["rva"].value) # Parse resource content for entry in entries: try: offset = self.section.rva2file(entry["rva"].value) padding = self.seekByte(offset, relative=False) if padding: yield padding yield ResourceContent(self, "content[]", entry) except HACHOIR_ERRORS, err: self.warning("Error when parsing entry %s: %s" % (entry.path, err)) size = (self.size - self.current_size) // 8 if size: yield PaddingBytes(self, "padding_end", size) class NE_VersionInfoNode(FieldSet): TYPE_STRING = 1 TYPE_NAME = { 0: "binary", 1: "string", } def __init__(self, parent, name): FieldSet.__init__(self, parent, name) self._size = alignValue(self["size"].value, 4) * 8 def createFields(self): yield UInt16(self, "size", "Node size (in bytes)") yield UInt16(self, "data_size") yield CString(self, "name", charset="ISO-8859-1") size = paddingSize(self.current_size//8, 4) if size: yield NullBytes(self, "padding[]", size) size = self["data_size"].value if size: if self["name"].value == "VS_VERSION_INFO": yield VersionInfoBinary(self, "value", size=size*8) else: yield String(self, "value", size, charset="ISO-8859-1") while 12 <= (self.size - self.current_size) // 8: yield NE_VersionInfoNode(self, "node[]") size = (self.size - self.current_size) // 8 if size: yield NullBytes(self, "padding[]", size) def createDescription(self): text = "Version info node: %s" % self["name"].value # if self["type"].value == self.TYPE_STRING and "value" in self: # text += "=%s" % self["value"].value return text
gpl-2.0
npuichigo/ttsflow
third_party/tensorflow/tensorflow/python/kernel_tests/variables_test.py
18
23601
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import operator import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.util import compat class VariablesTestCase(test.TestCase): def testInitialization(self): with self.test_session(): var0 = variables.Variable(0.0) self.assertEqual("Variable:0", var0.name) self.assertEqual([], var0.get_shape()) self.assertEqual([], var0.get_shape()) self.assertEqual([], var0.shape) var1 = variables.Variable(1.1) self.assertEqual("Variable_1:0", var1.name) self.assertEqual([], var1.get_shape()) self.assertEqual([], var1.get_shape()) self.assertEqual([], var1.shape) with self.assertRaisesOpError("Attempting to use uninitialized value"): var0.eval() with self.assertRaisesOpError("Attempting to use uninitialized value"): var1.eval() variables.global_variables_initializer().run() self.assertAllClose(0.0, var0.eval()) self.assertAllClose(1.1, var1.eval()) def testInitializationOrder(self): with self.test_session(): rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd") self.assertEqual("rnd:0", rnd.name) self.assertEqual([3, 6], rnd.get_shape()) self.assertEqual([3, 6], rnd.get_shape()) self.assertEqual([3, 6], rnd.shape) dep = variables.Variable(rnd.initialized_value(), name="dep") self.assertEqual("dep:0", dep.name) self.assertEqual([3, 6], dep.get_shape()) self.assertEqual([3, 6], dep.get_shape()) self.assertEqual([3, 6], dep.shape) # Currently have to set the shape manually for Add. added_val = rnd.initialized_value() + dep.initialized_value() + 2.0 added_val.set_shape(rnd.get_shape()) depdep = variables.Variable(added_val, name="depdep") self.assertEqual("depdep:0", depdep.name) self.assertEqual([3, 6], depdep.get_shape()) self.assertEqual([3, 6], depdep.get_shape()) self.assertEqual([3, 6], depdep.shape) variables.global_variables_initializer().run() self.assertAllClose(rnd.eval(), dep.eval()) self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval()) def testIterable(self): with self.assertRaisesRegexp(TypeError, "not iterable"): for _ in variables.Variable(0.0): pass with self.assertRaisesRegexp(TypeError, "not iterable"): for _ in variables.Variable([0.0, 1.0]): pass def testAssignments(self): with self.test_session(): var = variables.Variable(0.0) plus_one = var.assign_add(1.0) minus_one = var.assign_sub(2.0) four = var.assign(4.0) variables.global_variables_initializer().run() self.assertAllClose(0.0, var.eval()) self.assertAllClose(1.0, plus_one.eval()) self.assertAllClose(1.0, var.eval()) self.assertAllClose(-1.0, minus_one.eval()) self.assertAllClose(-1.0, var.eval()) self.assertAllClose(4.0, four.eval()) self.assertAllClose(4.0, var.eval()) def testResourceAssignments(self): with self.test_session(use_gpu=True): var = resource_variable_ops.ResourceVariable(0.0) plus_one = var.assign_add(1.0) minus_one = var.assign_sub(2.0) four = var.assign(4.0) variables.global_variables_initializer().run() self.assertAllClose(0.0, var.eval()) plus_one.eval() self.assertAllClose(1.0, var.eval()) minus_one.eval() self.assertAllClose(-1.0, var.eval()) four.eval() self.assertAllClose(4.0, var.eval()) def testZeroSizeStringAssign(self): with self.test_session() as sess: array = variables.Variable( initial_value=array_ops.zeros((0,), dtype=dtypes.string), name="foo", trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES]) sess.run(variables.local_variables_initializer()) old_value = array.value() copy_op = array.assign(old_value) self.assertEqual([], list(sess.run(copy_op))) def _countUpToTest(self, dtype): with self.test_session(): zero = constant_op.constant(0, dtype=dtype) var = variables.Variable(zero) count_up_to = var.count_up_to(3) variables.global_variables_initializer().run() self.assertEqual(0, var.eval()) self.assertEqual(0, count_up_to.eval()) self.assertEqual(1, var.eval()) self.assertEqual(1, count_up_to.eval()) self.assertEqual(2, var.eval()) self.assertEqual(2, count_up_to.eval()) self.assertEqual(3, var.eval()) with self.assertRaisesOpError("Reached limit of 3"): count_up_to.eval() self.assertEqual(3, var.eval()) with self.assertRaisesOpError("Reached limit of 3"): count_up_to.eval() self.assertEqual(3, var.eval()) def testCountUpToInt32(self): self._countUpToTest(dtypes.int32) def testCountUpToInt64(self): self._countUpToTest(dtypes.int64) def testControlDepsNone(self): with self.test_session(): c = constant_op.constant(1.0) with ops.control_dependencies([c]): # d get the control dep. d = constant_op.constant(2.0) # variables do not. var_x = variables.Variable(2.0) self.assertEqual([c.op], d.op.control_inputs) self.assertEqual([], var_x.initializer.control_inputs) self.assertEqual([], var_x.value().op.control_inputs) self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access def testControlFlow(self): with self.test_session() as sess: v0 = variables.Variable(0, name="v0") var_dict = {} # Call get_variable in each of the cond clauses. def var_in_then_clause(): v1 = variables.Variable(1, name="v1") var_dict["v1"] = v1 return v1 + v0 def var_in_else_clause(): v2 = variables.Variable(2, name="v2") var_dict["v2"] = v2 return v2 + v0 add = control_flow_ops.cond( math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause) v1 = var_dict["v1"] v2 = var_dict["v2"] # We should be able to initialize and run v1 and v2 without initializing # v0, even if the variable was created with a control dep on v0. sess.run(v1.initializer) self.assertEqual([1], sess.run(v1)) sess.run(v2.initializer) self.assertEqual([2], sess.run(v2)) # v0 should still be uninitialized. with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"): sess.run(v0) # We should not be able to run 'add' yet. with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"): sess.run(add) # If we initialize v0 we should be able to run 'add'. sess.run(v0.initializer) sess.run(add) def testControlFlowInitialization(self): """Expects an error if an initializer is in a control-flow scope.""" def cond(i, _): return i < 10 def body(i, _): zero = array_ops.zeros([], dtype=dtypes.int32) v = variables.Variable(initial_value=zero) return (i + 1, v.read_value()) with self.assertRaisesRegexp(ValueError, "inside a control-flow"): control_flow_ops.while_loop(cond, body, [0, 0]) def testUseVariableAsTensor(self): with self.test_session(): var_x = variables.Variable(2.0) var_y = variables.Variable(3.0) variables.global_variables_initializer().run() self.assertAllClose(2.0, var_x.eval()) self.assertAllClose(3.0, var_y.eval()) self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval()) def testZeroSizeVarSameAsConst(self): with self.test_session(): zero_size_var = variables.Variable(array_ops.zeros([0, 2])) zero_size_const = array_ops.ones([2, 0]) variable_mul = math_ops.matmul(zero_size_const, zero_size_var) const_mul = math_ops.matmul( zero_size_const, zero_size_const, transpose_b=True) variables.global_variables_initializer().run() variable_output = variable_mul.eval() self.assertAllClose(const_mul.eval(), variable_output) self.assertAllClose([[0., 0.], [0., 0.]], variable_output) def testCachingDevice(self): with self.test_session(): var = variables.Variable(2.0) self.assertEqual(var.device, var.value().device) self.assertEqual(var.device, var.initialized_value().device) var_cached = variables.Variable(2.0, caching_device="/job:foo") self.assertFalse(var_cached.device.startswith("/job:foo")) self.assertTrue(var_cached.value().device.startswith("/job:foo")) def testCollections(self): with self.test_session(): var_x = variables.Variable(2.0) var_y = variables.Variable(2.0, trainable=False) var_z = variables.Variable(2.0, trainable=True) var_t = variables.Variable( 2.0, trainable=True, collections=[ ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES ]) self.assertEqual([var_x, var_y, var_z, var_t], variables.global_variables()) self.assertEqual([var_x, var_z, var_t], variables.trainable_variables()) def testOperators(self): with self.test_session(): var_f = variables.Variable([2.0]) add = var_f + 0.0 radd = 1.0 + var_f sub = var_f - 1.0 rsub = 1.0 - var_f mul = var_f * 10.0 rmul = 10.0 * var_f div = var_f / 10.0 rdiv = 10.0 / var_f lt = var_f < 3.0 rlt = 3.0 < var_f le = var_f <= 2.0 rle = 2.0 <= var_f gt = var_f > 3.0 rgt = 3.0 > var_f ge = var_f >= 2.0 rge = 2.0 >= var_f neg = -var_f abs_v = abs(var_f) var_i = variables.Variable([20]) mod = var_i % 7 rmod = 103 % var_i var_b = variables.Variable([True, False]) and_v = operator.and_(var_b, [True, True]) or_v = operator.or_(var_b, [False, True]) xor_v = operator.xor(var_b, [False, False]) invert_v = ~var_b rnd = np.random.rand(4, 4).astype("f") var_t = variables.Variable(rnd) slice_v = var_t[2, 0:0] var_m = variables.Variable([[2.0, 3.0]]) matmul = var_m.__matmul__([[10.0], [20.0]]) rmatmul = var_m.__rmatmul__([[10.0], [20.0]]) variables.global_variables_initializer().run() self.assertAllClose([2.0], add.eval()) self.assertAllClose([3.0], radd.eval()) self.assertAllClose([1.0], sub.eval()) self.assertAllClose([-1.0], rsub.eval()) self.assertAllClose([20.0], mul.eval()) self.assertAllClose([20.0], rmul.eval()) self.assertAllClose([0.2], div.eval()) self.assertAllClose([5.0], rdiv.eval()) self.assertAllClose([-2.0], neg.eval()) self.assertAllClose([2.0], abs_v.eval()) self.assertAllClose([True], lt.eval()) self.assertAllClose([False], rlt.eval()) self.assertAllClose([True], le.eval()) self.assertAllClose([True], rle.eval()) self.assertAllClose([False], gt.eval()) self.assertAllClose([True], rgt.eval()) self.assertAllClose([True], ge.eval()) self.assertAllClose([True], rge.eval()) self.assertAllClose([6], mod.eval()) self.assertAllClose([3], rmod.eval()) self.assertAllClose([True, False], and_v.eval()) self.assertAllClose([True, True], or_v.eval()) self.assertAllClose([True, False], xor_v.eval()) self.assertAllClose([False, True], invert_v.eval()) self.assertAllClose(rnd[2, 0:0], slice_v.eval()) self.assertAllClose([[80.0]], matmul.eval()) self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], rmatmul.eval()) def testSession(self): with self.test_session() as sess: var = variables.Variable([1, 12]) variables.global_variables_initializer().run() self.assertAllClose([1, 12], sess.run(var)) def testDevicePlacement(self): with self.test_session() as sess: with ops.device("/cpu:0"): var = variables.Variable([1, 12]) init_value = var.initialized_value() init_op = variables.global_variables_initializer() self.assertEqual(var.op.device, init_value.device) self.assertEqual(var.op.device, init_op.device) sess.run(init_op) def testColocation(self): with ops.device("/job:ps"): var = variables.Variable(0, name="v") with ops.device("/job:worker/task:7"): assign_op = var.assign(1) self.assertDeviceEqual("/job:ps", assign_op.device) self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups()) def testInitializerFunction(self): value = [[-42], [133.7]] shape = [2, 1] with self.test_session(): initializer = lambda: constant_op.constant(value) v1 = variables.Variable(initializer, dtype=dtypes.float32) self.assertEqual(shape, v1.get_shape()) self.assertEqual(shape, v1.shape) self.assertAllClose(value, v1.initial_value.eval()) with self.assertRaises(errors_impl.FailedPreconditionError): v1.eval() v2 = variables.Variable( math_ops.negative(v1.initialized_value()), dtype=dtypes.float32) self.assertEqual(v1.get_shape(), v2.get_shape()) self.assertEqual(v1.shape, v2.shape) self.assertAllClose(np.negative(value), v2.initial_value.eval()) with self.assertRaises(errors_impl.FailedPreconditionError): v2.eval() variables.global_variables_initializer().run() self.assertAllClose(np.negative(value), v2.eval()) def testNoRefDataRace(self): with self.test_session(): a = variables.Variable([1, 2, 3], dtype=dtypes.float32) b = variables.Variable(a.initialized_value() + 2) c = variables.Variable(b.initialized_value() + 2) variables.global_variables_initializer().run() self.assertAllEqual(a.eval(), [1, 2, 3]) self.assertAllEqual(b.eval(), [3, 4, 5]) self.assertAllEqual(c.eval(), [5, 6, 7]) def testInitializerFunctionDevicePlacement(self): with self.test_session(): initializer = lambda: constant_op.constant(42.0) with ops.device("/cpu:100"): v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1") expected_device = "/device:CPU:100" expected_group_v1 = [b"loc:@v1"] self.assertEqual(expected_device, v1.op.device) self.assertEqual(expected_group_v1, v1.op.colocation_groups()) for i in v1.initializer.inputs: self.assertEqual(expected_group_v1, i.op.colocation_groups()) v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2") expected_group_v2 = [b"loc:@v2"] self.assertEqual(expected_group_v2, v2.op.colocation_groups()) for i in v2.initializer.inputs: self.assertEqual(expected_group_v2, i.op.colocation_groups()) def testLoad(self): with self.test_session(): var = variables.Variable(np.zeros((5, 5), np.float32)) variables.global_variables_initializer().run() var.load(np.ones((5, 5), np.float32)) self.assertAllClose(np.ones((5, 5), np.float32), var.eval()) def testRepr(self): var = variables.Variable(np.zeros((5, 5), np.float32), name='noop') self.assertEqual( "<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>", repr(var)) class IsInitializedTest(test.TestCase): def testNoVars(self): with ops.Graph().as_default(), self.test_session() as sess: uninited = variables.report_uninitialized_variables() self.assertEqual(0, sess.run(uninited).size) def testAssertVariablesInitialized(self): with ops.Graph().as_default(), self.test_session() as sess: v = variables.Variable([1, 2], name="v") w = variables.Variable([3, 4], name="w") _ = v, w uninited = variables.report_uninitialized_variables() self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited)) variables.global_variables_initializer().run() self.assertEqual(0, sess.run(uninited).size) def testVariableList(self): with ops.Graph().as_default(), self.test_session() as sess: v = variables.Variable([1, 2], name="v") w = variables.Variable([3, 4], name="w") uninited = variables.report_uninitialized_variables() self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited)) sess.run(w.initializer) self.assertAllEqual(np.array([b"v"]), sess.run(uninited)) v.initializer.run() self.assertEqual(0, sess.run(uninited).size) def testZeroSizeVarInitialized(self): with ops.Graph().as_default(), self.test_session() as sess: v = variables.Variable(array_ops.zeros([0, 2]), name="v") uninited = variables.report_uninitialized_variables() v.initializer.run() # not strictly necessary self.assertEqual(0, sess.run(uninited).size) def testTrainingWithZeroSizeVar(self): with ops.Graph().as_default(), self.test_session() as sess: a = variables.Variable(array_ops.zeros([0, 2])) b = variables.Variable(array_ops.ones([2, 2])) objective = math_ops.reduce_sum(b + math_ops.matmul( a, a, transpose_a=True)) variables.global_variables_initializer().run() do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize( objective) sess.run([do_opt]) self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval()) class ObsoleteIsInitializedTest(test.TestCase): def testNoVars(self): with ops.Graph().as_default(): self.assertEqual(None, variables.assert_variables_initialized()) def testVariables(self): with ops.Graph().as_default(), self.test_session() as sess: v = variables.Variable([1, 2]) w = variables.Variable([3, 4]) _ = v, w inited = variables.assert_variables_initialized() with self.assertRaisesOpError("Attempting to use uninitialized value"): sess.run(inited) variables.global_variables_initializer().run() sess.run(inited) def testVariableList(self): with ops.Graph().as_default(), self.test_session() as sess: v = variables.Variable([1, 2]) w = variables.Variable([3, 4]) inited = variables.assert_variables_initialized([v]) with self.assertRaisesOpError("Attempting to use uninitialized value"): inited.op.run() sess.run(w.initializer) with self.assertRaisesOpError("Attempting to use uninitialized value"): inited.op.run() v.initializer.run() inited.op.run() class PartitionedVariableTest(test.TestCase): def testPartitionedVariable(self): with ops.Graph().as_default(): v0 = variables.Variable([0]) v1 = variables.Variable([1]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) v1._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1])) partitions = [2] # Pass variable_list as [v1, v0] to ensure they are properly # re-sorted to [v0, v1] based on their slice info offsets. partitioned_variable = variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v1, v0], partitions=partitions) concatenated = ops.convert_to_tensor(partitioned_variable) num_partitions = len(partitioned_variable) iterated_partitions = list(partitioned_variable) self.assertEqual(2, num_partitions) self.assertEqual([v0, v1], iterated_partitions) self.assertEqual([2], concatenated.get_shape()) self.assertEqual([2], concatenated.shape) def testPartitionedVariableFailures(self): with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, "empty"): variables.PartitionedVariable( name="fail", shape=2, dtype=dtypes.int32, variable_list=[], partitions=[]) with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"): v0 = variables.Variable([0]) partitions = [1] variables.PartitionedVariable( name="two_vars", shape=[1], dtype=v0.dtype, variable_list=[v0], partitions=partitions) with self.assertRaisesRegexp(ValueError, "full shapes must match"): v0 = variables.Variable([0]) v1 = variables.Variable([1]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) v1._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1])) partitions = [2] variables.PartitionedVariable( name="two_vars", shape=[3], dtype=v0.dtype, variable_list=[v1, v0], partitions=partitions) with self.assertRaisesRegexp(ValueError, "must be positive"): v0 = variables.Variable([0]) v0._set_save_slice_info( variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1])) partitions = [0] variables.PartitionedVariable( name="two_vars", shape=[2], dtype=v0.dtype, variable_list=[v0], partitions=partitions) class VariableContainerTest(test.TestCase): def testContainer(self): with ops.Graph().as_default(): v0 = variables.Variable([0]) with ops.container("l1"): v1 = variables.Variable([1]) with ops.container("l2"): v2 = variables.Variable([2]) special_v = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="VariableInL3", container="l3", shared_name="") v3 = variables.Variable([3]) v4 = variables.Variable([4]) self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container")) self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container")) self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container")) self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container")) self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container")) self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container")) if __name__ == "__main__": test.main()
apache-2.0
sivatha/video-player-sample
source/core/js/libs/closure-library/closure/bin/build/depswriter.py
17
6206
#!/usr/bin/env python # # Copyright 2009 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates out a Closure deps.js file given a list of JavaScript sources. Paths can be specified as arguments or (more commonly) specifying trees with the flags (call with --help for descriptions). Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...] """ import logging import optparse import os import posixpath import shlex import sys import source import treescan def MakeDepsFile(source_map): """Make a generated deps file. Args: source_map: A dict map of the source path to source.Source object. Returns: str, A generated deps file source. """ # Write in path alphabetical order paths = source_map.keys() paths.sort() lines = [] for path in paths: js_source = source_map[path] # We don't need to add entries that don't provide anything. if js_source.provides: lines.append(_GetDepsLine(path, js_source)) return ''.join(lines) def _GetDepsLine(path, js_source): """Get a deps.js file string for a source.""" provides = list(js_source.provides) provides.sort() requires = list(js_source.requires) requires.sort() return 'goog.addDependency(\'%s\', %s, %s);\n' % (path, provides, requires) def _GetOptionsParser(): """Get the options parser.""" parser = optparse.OptionParser(__doc__) parser.add_option('--output_file', dest='output_file', action='store', help=('If specified, write output to this path instead of ' 'writing to standard output.')) parser.add_option('--root', dest='roots', default=[], action='append', help='A root directory to scan for JS source files. ' 'Paths of JS files in generated deps file will be ' 'relative to this path. This flag may be specified ' 'multiple times.') parser.add_option('--root_with_prefix', dest='roots_with_prefix', default=[], action='append', help='A root directory to scan for JS source files, plus ' 'a prefix (if either contains a space, surround with ' 'quotes). Paths in generated deps file will be relative ' 'to the root, but preceeded by the prefix. This flag ' 'may be specified multiple times.') parser.add_option('--path_with_depspath', dest='paths_with_depspath', default=[], action='append', help='A path to a source file and an alternate path to ' 'the file in the generated deps file (if either contains ' 'a space, surround with whitespace). This flag may be ' 'specifified multiple times.') return parser def _NormalizePathSeparators(path): """Replaces OS-specific path separators with POSIX-style slashes. Args: path: str, A file path. Returns: str, The path with any OS-specific path separators (such as backslash on Windows) replaced with URL-compatible forward slashes. A no-op on systems that use POSIX paths. """ return path.replace(os.sep, posixpath.sep) def _GetRelativePathToSourceDict(root, prefix=''): """Scans a top root directory for .js sources. Args: root: str, Root directory. prefix: str, Prefix for returned paths. Returns: dict, A map of relative paths (with prefix, if given), to source.Source objects. """ # Remember and restore the cwd when we're done. We work from the root so # that paths are relative from the root. start_wd = os.getcwd() os.chdir(root) path_to_source = {} for path in treescan.ScanTreeForJsFiles('.'): prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path)) path_to_source[prefixed_path] = source.Source(source.GetFileContents(path)) os.chdir(start_wd) return path_to_source def _GetPair(s): """Return a string as a shell-parsed tuple. Two values expected.""" try: # shlex uses '\' as an escape character, so they must be escaped. s = s.replace('\\', '\\\\') first, second = shlex.split(s) return (first, second) except: raise Exception('Unable to parse input line as a pair: %s' % s) def main(): """CLI frontend to MakeDepsFile.""" logging.basicConfig(format=(sys.argv[0] + ': %(message)s'), level=logging.INFO) options, args = _GetOptionsParser().parse_args() path_to_source = {} # Roots without prefixes for root in options.roots: path_to_source.update(_GetRelativePathToSourceDict(root)) # Roots with prefixes for root_and_prefix in options.roots_with_prefix: root, prefix = _GetPair(root_and_prefix) path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix)) # Source paths for path in args: path_to_source[path] = source.Source(source.GetFileContents(path)) # Source paths with alternate deps paths for path_with_depspath in options.paths_with_depspath: srcpath, depspath = _GetPair(path_with_depspath) path_to_source[depspath] = source.Source(source.GetFileContents(srcpath)) # Make our output pipe. if options.output_file: out = open(options.output_file, 'w') else: out = sys.stdout out.write('// This file was autogenerated by %s.\n' % sys.argv[0]) out.write('// Please do not edit.\n') out.write(MakeDepsFile(path_to_source)) if __name__ == '__main__': main()
apache-2.0
WimpyAnalytics/django-andablog
demo/common/migrations/0002_auto_20150507_1708.py
1
1391
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django import VERSION as DJANGO_VERSION def get_operations(): """ This will break things if you upgrade Django to 1.8 having already applied this migration in 1.7. Since this is for a demo site it doesn't really matter (simply blow away the DB if you want to go to 1.8) Our demo site is a unusual in that we want to run it's tests (for integration testing) in multiple Django versions. Typical sites don't have to worry about that sort of thing. """ compatible = (1, 8) <= DJANGO_VERSION < (1, 10) if not compatible: return [] return [ migrations.AlterField( model_name='user', name='groups', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'), ), migrations.AlterField( model_name='user', name='last_login', field=models.DateTimeField(null=True, verbose_name='last login', blank=True), ), ] class Migration(migrations.Migration): dependencies = [ ('common', '0001_initial'), ] operations = get_operations()
bsd-2-clause
dfdx2/django
tests/httpwrappers/tests.py
14
30531
import copy import json import os import pickle import unittest import uuid from django.core.exceptions import DisallowedRedirect, SuspiciousOperation from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import request_finished from django.db import close_old_connections from django.http import ( BadHeaderError, HttpResponse, HttpResponseNotAllowed, HttpResponseNotModified, HttpResponsePermanentRedirect, HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie, StreamingHttpResponse, parse_cookie, ) from django.test import SimpleTestCase from django.utils.functional import lazystr class QueryDictTests(SimpleTestCase): def test_create_with_no_args(self): self.assertEqual(QueryDict(), QueryDict('')) def test_missing_key(self): q = QueryDict() with self.assertRaises(KeyError): q.__getitem__('foo') def test_immutability(self): q = QueryDict() with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() def test_immutable_get_with_default(self): q = QueryDict() self.assertEqual(q.get('foo', 'default'), 'default') def test_immutable_basic_operations(self): q = QueryDict() self.assertEqual(q.getlist('foo'), []) self.assertNotIn('foo', q) self.assertEqual(list(q.items()), []) self.assertEqual(list(q.lists()), []) self.assertEqual(list(q.keys()), []) self.assertEqual(list(q.values()), []) self.assertEqual(len(q), 0) self.assertEqual(q.urlencode(), '') def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict('foo=bar') self.assertEqual(q['foo'], 'bar') with self.assertRaises(KeyError): q.__getitem__('bar') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('foo', 'default'), 'bar') self.assertEqual(q.get('bar', 'default'), 'default') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.getlist('bar'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) self.assertIn('foo', q) self.assertNotIn('bar', q) self.assertEqual(list(q.items()), [('foo', 'bar')]) self.assertEqual(list(q.lists()), [('foo', ['bar'])]) self.assertEqual(list(q.keys()), ['foo']) self.assertEqual(list(q.values()), ['bar']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') self.assertEqual(q.urlencode(), 'foo=bar') def test_urlencode(self): q = QueryDict(mutable=True) q['next'] = '/a&b/' self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/') q = QueryDict(mutable=True) q['next'] = '/t\xebst&key/' self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/') def test_mutable_copy(self): """A copy of a QueryDict is mutable.""" q = QueryDict().copy() with self.assertRaises(KeyError): q.__getitem__("foo") q['name'] = 'john' self.assertEqual(q['name'], 'john') def test_mutable_delete(self): q = QueryDict(mutable=True) q['name'] = 'john' del q['name'] self.assertNotIn('name', q) def test_basic_mutable_operations(self): q = QueryDict(mutable=True) q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.assertIn('foo', q) self.assertCountEqual(q.items(), [('foo', 'another'), ('name', 'john')]) self.assertCountEqual(q.lists(), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])]) self.assertCountEqual(q.keys(), ['foo', 'name']) self.assertCountEqual(q.values(), ['another', 'john']) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar']) q.clear() self.assertEqual(len(q), 0) def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict('vote=yes&vote=no') self.assertEqual(q['vote'], 'no') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('vote', 'default'), 'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), ['yes', 'no']) self.assertEqual(q.getlist('foo'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) self.assertIn('vote', q) self.assertNotIn('foo', q) self.assertEqual(list(q.items()), [('vote', 'no')]) self.assertEqual(list(q.lists()), [('vote', ['yes', 'no'])]) self.assertEqual(list(q.keys()), ['vote']) self.assertEqual(list(q.values()), ['no']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') with self.assertRaises(AttributeError): q.__delitem__('vote') def test_pickle(self): q = QueryDict() q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) q = QueryDict('a=b&c=d') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) q = QueryDict('a=b&c=d&a=1') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) def test_update_from_querydict(self): """Regression test for #8278: QueryDict.update(QueryDict)""" x = QueryDict("a=1&a=2", mutable=True) y = QueryDict("a=3&a=4") x.update(y) self.assertEqual(x.getlist('a'), ['1', '2', '3', '4']) def test_non_default_encoding(self): """#13572 - QueryDict with a non-default encoding""" q = QueryDict('cur=%A4', encoding='iso-8859-15') self.assertEqual(q.encoding, 'iso-8859-15') self.assertEqual(list(q.items()), [('cur', '€')]) self.assertEqual(q.urlencode(), 'cur=%A4') q = q.copy() self.assertEqual(q.encoding, 'iso-8859-15') self.assertEqual(list(q.items()), [('cur', '€')]) self.assertEqual(q.urlencode(), 'cur=%A4') self.assertEqual(copy.copy(q).encoding, 'iso-8859-15') self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15') def test_querydict_fromkeys(self): self.assertEqual(QueryDict.fromkeys(['key1', 'key2', 'key3']), QueryDict('key1&key2&key3')) def test_fromkeys_with_nonempty_value(self): self.assertEqual( QueryDict.fromkeys(['key1', 'key2', 'key3'], value='val'), QueryDict('key1=val&key2=val&key3=val') ) def test_fromkeys_is_immutable_by_default(self): # Match behavior of __init__() which is also immutable by default. q = QueryDict.fromkeys(['key1', 'key2', 'key3']) with self.assertRaisesMessage(AttributeError, 'This QueryDict instance is immutable'): q['key4'] = 'nope' def test_fromkeys_mutable_override(self): q = QueryDict.fromkeys(['key1', 'key2', 'key3'], mutable=True) q['key4'] = 'yep' self.assertEqual(q, QueryDict('key1&key2&key3&key4=yep')) def test_duplicates_in_fromkeys_iterable(self): self.assertEqual(QueryDict.fromkeys('xyzzy'), QueryDict('x&y&z&z&y')) def test_fromkeys_with_nondefault_encoding(self): key_utf16 = b'\xff\xfe\x8e\x02\xdd\x01\x9e\x02' value_utf16 = b'\xff\xfe\xdd\x01n\x00l\x00P\x02\x8c\x02' q = QueryDict.fromkeys([key_utf16], value=value_utf16, encoding='utf-16') expected = QueryDict('', mutable=True) expected['ʎǝʞ'] = 'ǝnlɐʌ' self.assertEqual(q, expected) def test_fromkeys_empty_iterable(self): self.assertEqual(QueryDict.fromkeys([]), QueryDict('')) def test_fromkeys_noniterable(self): with self.assertRaises(TypeError): QueryDict.fromkeys(0) class HttpResponseTests(unittest.TestCase): def test_headers_type(self): r = HttpResponse() # ASCII strings or bytes values are converted to strings. r['key'] = 'test' self.assertEqual(r['key'], 'test') r['key'] = 'test'.encode('ascii') self.assertEqual(r['key'], 'test') self.assertIn(b'test', r.serialize_headers()) # Non-ASCII values are serialized to Latin-1. r['key'] = 'café' self.assertIn('café'.encode('latin-1'), r.serialize_headers()) # Other unicode values are MIME-encoded (there's no way to pass them as bytes). r['key'] = '†' self.assertEqual(r['key'], '=?utf-8?b?4oCg?=') self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers()) # The response also converts string or bytes keys to strings, but requires # them to contain ASCII r = HttpResponse() del r['Content-Type'] r['foo'] = 'bar' headers = list(r.items()) self.assertEqual(len(headers), 1) self.assertEqual(headers[0], ('foo', 'bar')) r = HttpResponse() del r['Content-Type'] r[b'foo'] = 'bar' headers = list(r.items()) self.assertEqual(len(headers), 1) self.assertEqual(headers[0], ('foo', 'bar')) self.assertIsInstance(headers[0][0], str) r = HttpResponse() with self.assertRaises(UnicodeError): r.__setitem__('føø', 'bar') with self.assertRaises(UnicodeError): r.__setitem__('føø'.encode(), 'bar') def test_long_line(self): # Bug #20889: long lines trigger newlines to be added to headers # (which is not allowed due to bug #10188) h = HttpResponse() f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1') f = f.decode('utf-8') h['Content-Disposition'] = 'attachment; filename="%s"' % f # This one is triggering http://bugs.python.org/issue20747, that is Python # will itself insert a newline in the header h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"' def test_newlines_in_headers(self): # Bug #10188: Do not allow newlines in headers (CR or LF) r = HttpResponse() with self.assertRaises(BadHeaderError): r.__setitem__('test\rstr', 'test') with self.assertRaises(BadHeaderError): r.__setitem__('test\nstr', 'test') def test_dict_behavior(self): """ Test for bug #14020: Make HttpResponse.get work like dict.get """ r = HttpResponse() self.assertIsNone(r.get('test')) def test_non_string_content(self): # Bug 16494: HttpResponse should behave consistently with non-strings r = HttpResponse(12345) self.assertEqual(r.content, b'12345') # test content via property r = HttpResponse() r.content = 12345 self.assertEqual(r.content, b'12345') def test_iter_content(self): r = HttpResponse(['abc', 'def', 'ghi']) self.assertEqual(r.content, b'abcdefghi') # test iter content via property r = HttpResponse() r.content = ['idan', 'alex', 'jacob'] self.assertEqual(r.content, b'idanalexjacob') r = HttpResponse() r.content = [1, 2, 3] self.assertEqual(r.content, b'123') # test odd inputs r = HttpResponse() r.content = ['1', '2', 3, '\u079e'] # '\xde\x9e' == unichr(1950).encode() self.assertEqual(r.content, b'123\xde\x9e') # .content can safely be accessed multiple times. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(r.content, r.content) self.assertEqual(r.content, b'helloworld') # __iter__ can safely be called multiple times (#20187). self.assertEqual(b''.join(r), b'helloworld') self.assertEqual(b''.join(r), b'helloworld') # Accessing .content still works. self.assertEqual(r.content, b'helloworld') # Accessing .content also works if the response was iterated first. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(b''.join(r), b'helloworld') self.assertEqual(r.content, b'helloworld') # Additional content can be written to the response. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(r.content, b'helloworld') r.write('!') self.assertEqual(r.content, b'helloworld!') def test_iterator_isnt_rewound(self): # Regression test for #13222 r = HttpResponse('abc') i = iter(r) self.assertEqual(list(i), [b'abc']) self.assertEqual(list(i), []) def test_lazy_content(self): r = HttpResponse(lazystr('helloworld')) self.assertEqual(r.content, b'helloworld') def test_file_interface(self): r = HttpResponse() r.write(b"hello") self.assertEqual(r.tell(), 5) r.write("привет") self.assertEqual(r.tell(), 17) r = HttpResponse(['abc']) r.write('def') self.assertEqual(r.tell(), 6) self.assertEqual(r.content, b'abcdef') # with Content-Encoding header r = HttpResponse() r['Content-Encoding'] = 'winning' r.write(b'abc') r.write(b'def') self.assertEqual(r.content, b'abcdef') def test_stream_interface(self): r = HttpResponse('asdf') self.assertEqual(r.getvalue(), b'asdf') r = HttpResponse() self.assertIs(r.writable(), True) r.writelines(['foo\n', 'bar\n', 'baz\n']) self.assertEqual(r.content, b'foo\nbar\nbaz\n') def test_unsafe_redirect(self): bad_urls = [ 'data:text/html,<script>window.alert("xss")</script>', 'mailto:[email protected]', 'file:///etc/passwd', ] for url in bad_urls: with self.assertRaises(SuspiciousOperation): HttpResponseRedirect(url) with self.assertRaises(SuspiciousOperation): HttpResponsePermanentRedirect(url) class HttpResponseSubclassesTests(SimpleTestCase): def test_redirect(self): response = HttpResponseRedirect('/redirected/') self.assertEqual(response.status_code, 302) # Standard HttpResponse init args can be used response = HttpResponseRedirect( '/redirected/', content='The resource has temporarily moved', content_type='text/html', ) self.assertContains(response, 'The resource has temporarily moved', status_code=302) self.assertEqual(response.url, response['Location']) def test_redirect_lazy(self): """Make sure HttpResponseRedirect works with lazy strings.""" r = HttpResponseRedirect(lazystr('/redirected/')) self.assertEqual(r.url, '/redirected/') def test_redirect_repr(self): response = HttpResponseRedirect('/redirected/') expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">' self.assertEqual(repr(response), expected) def test_invalid_redirect_repr(self): """ If HttpResponseRedirect raises DisallowedRedirect, its __repr__() should work (in the debug view, for example). """ response = HttpResponseRedirect.__new__(HttpResponseRedirect) with self.assertRaisesMessage(DisallowedRedirect, "Unsafe redirect to URL with protocol 'ssh'"): HttpResponseRedirect.__init__(response, 'ssh://foo') expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="ssh://foo">' self.assertEqual(repr(response), expected) def test_not_modified(self): response = HttpResponseNotModified() self.assertEqual(response.status_code, 304) # 304 responses should not have content/content-type with self.assertRaises(AttributeError): response.content = "Hello dear" self.assertNotIn('content-type', response) def test_not_modified_repr(self): response = HttpResponseNotModified() self.assertEqual(repr(response), '<HttpResponseNotModified status_code=304>') def test_not_allowed(self): response = HttpResponseNotAllowed(['GET']) self.assertEqual(response.status_code, 405) # Standard HttpResponse init args can be used response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html') self.assertContains(response, 'Only the GET method is allowed', status_code=405) def test_not_allowed_repr(self): response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain') expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">' self.assertEqual(repr(response), expected) def test_not_allowed_repr_no_content_type(self): response = HttpResponseNotAllowed(('GET', 'POST')) del response['Content-Type'] self.assertEqual(repr(response), '<HttpResponseNotAllowed [GET, POST] status_code=405>') class JsonResponseTests(SimpleTestCase): def test_json_response_non_ascii(self): data = {'key': 'łóżko'} response = JsonResponse(data) self.assertEqual(json.loads(response.content.decode()), data) def test_json_response_raises_type_error_with_default_setting(self): with self.assertRaisesMessage( TypeError, 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False' ): JsonResponse([1, 2, 3]) def test_json_response_text(self): response = JsonResponse('foobar', safe=False) self.assertEqual(json.loads(response.content.decode()), 'foobar') def test_json_response_list(self): response = JsonResponse(['foo', 'bar'], safe=False) self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar']) def test_json_response_uuid(self): u = uuid.uuid4() response = JsonResponse(u, safe=False) self.assertEqual(json.loads(response.content.decode()), str(u)) def test_json_response_custom_encoder(self): class CustomDjangoJSONEncoder(DjangoJSONEncoder): def encode(self, o): return json.dumps({'foo': 'bar'}) response = JsonResponse({}, encoder=CustomDjangoJSONEncoder) self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'}) def test_json_response_passing_arguments_to_json_dumps(self): response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2}) self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}') class StreamingHttpResponseTests(SimpleTestCase): def test_streaming_response(self): r = StreamingHttpResponse(iter(['hello', 'world'])) # iterating over the response itself yields bytestring chunks. chunks = list(r) self.assertEqual(chunks, [b'hello', b'world']) for chunk in chunks: self.assertIsInstance(chunk, bytes) # and the response can only be iterated once. self.assertEqual(list(r), []) # even when a sequence that can be iterated many times, like a list, # is given as content. r = StreamingHttpResponse(['abc', 'def']) self.assertEqual(list(r), [b'abc', b'def']) self.assertEqual(list(r), []) # iterating over strings still yields bytestring chunks. r.streaming_content = iter(['hello', 'café']) chunks = list(r) # '\xc3\xa9' == unichr(233).encode() self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9']) for chunk in chunks: self.assertIsInstance(chunk, bytes) # streaming responses don't have a `content` attribute. self.assertFalse(hasattr(r, 'content')) # and you can't accidentally assign to a `content` attribute. with self.assertRaises(AttributeError): r.content = 'xyz' # but they do have a `streaming_content` attribute. self.assertTrue(hasattr(r, 'streaming_content')) # that exists so we can check if a response is streaming, and wrap or # replace the content iterator. r.streaming_content = iter(['abc', 'def']) r.streaming_content = (chunk.upper() for chunk in r.streaming_content) self.assertEqual(list(r), [b'ABC', b'DEF']) # coercing a streaming response to bytes doesn't return a complete HTTP # message like a regular response does. it only gives us the headers. r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual(bytes(r), b'Content-Type: text/html; charset=utf-8') # and this won't consume its content. self.assertEqual(list(r), [b'hello', b'world']) # additional content cannot be written to the response. r = StreamingHttpResponse(iter(['hello', 'world'])) with self.assertRaises(Exception): r.write('!') # and we can't tell the current position. with self.assertRaises(Exception): r.tell() r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual(r.getvalue(), b'helloworld') class FileCloseTests(SimpleTestCase): def setUp(self): # Disable the request_finished signal during this test # to avoid interfering with the database connection. request_finished.disconnect(close_old_connections) def tearDown(self): request_finished.connect(close_old_connections) def test_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertTrue(file1.closed) r.close() # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertTrue(file1.closed) self.assertTrue(file2.closed) def test_streaming_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = StreamingHttpResponse(file1) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = StreamingHttpResponse(file1) r.streaming_content = file2 self.assertFalse(file1.closed) self.assertFalse(file2.closed) r.close() self.assertTrue(file1.closed) self.assertTrue(file2.closed) class CookieTests(unittest.TestCase): def test_encode(self): """Semicolons and commas are encoded.""" c = SimpleCookie() c['test'] = "An,awkward;value" self.assertNotIn(";", c.output().rstrip(';')) # IE compat self.assertNotIn(",", c.output().rstrip(';')) # Safari compat def test_decode(self): """Semicolons and commas are decoded.""" c = SimpleCookie() c['test'] = "An,awkward;value" c2 = SimpleCookie() c2.load(c.output()[12:]) self.assertEqual(c['test'].value, c2['test'].value) c3 = parse_cookie(c.output()[12:]) self.assertEqual(c['test'].value, c3['test']) def test_decode_2(self): c = SimpleCookie() c['test'] = b"\xf0" c2 = SimpleCookie() c2.load(c.output()[12:]) self.assertEqual(c['test'].value, c2['test'].value) c3 = parse_cookie(c.output()[12:]) self.assertEqual(c['test'].value, c3['test']) def test_nonstandard_keys(self): """ A single non-standard cookie name doesn't affect all cookies (#13007). """ self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys()) def test_repeated_nonstandard_keys(self): """ A repeated non-standard name doesn't affect all cookies (#15852). """ self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys()) def test_python_cookies(self): """ Test cases copied from Python's Lib/test/test_http_cookies.py """ self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'}) # Here parse_cookie() differs from Python's cookie parsing in that it # treats all semicolons as delimiters, even within quotes. self.assertEqual( parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'), {'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'} ) # Illegal cookies that have an '=' char in an unquoted value. self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'}) # Cookies with ':' character in their name. self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'}) # Cookies with '[' and ']'. self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'}) def test_cookie_edgecases(self): # Cookies that RFC6265 allows. self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'}) # parse_cookie() has historically kept only the last cookie with the # same name. self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'}) def test_invalid_cookies(self): """ Cookie strings that go against RFC6265 but browsers will send if set via document.cookie. """ # Chunks without an equals sign appear as unnamed values per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys()) # Even a double quote may be an unamed value. self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'}) # Spaces in names and values, and an equals sign in values. self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'}) # More characters the spec forbids. self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'}) # Unicode characters. The spec only allows ASCII. self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': 'André Bessette'}) # Browsers don't send extra whitespace or semicolons in Cookie headers, # but parse_cookie() should parse whitespace the same way # document.cookie parses whitespace. self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''}) def test_httponly_after_load(self): c = SimpleCookie() c.load("name=val") c['name']['httponly'] = True self.assertTrue(c['name']['httponly']) def test_load_dict(self): c = SimpleCookie() c.load({'name': 'val'}) self.assertEqual(c['name'].value, 'val') def test_pickle(self): rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1' expected_output = 'Set-Cookie: %s' % rawdata C = SimpleCookie() C.load(rawdata) self.assertEqual(C.output(), expected_output) for proto in range(pickle.HIGHEST_PROTOCOL + 1): C1 = pickle.loads(pickle.dumps(C, protocol=proto)) self.assertEqual(C1.output(), expected_output)
bsd-3-clause
Yuliang-Zou/Automatic_Group_Photography_Enhancement
lib/roi_pooling_layer/roi_pooling_op_grad.py
1
1375
import tensorflow as tf from tensorflow.python.framework import ops import roi_pooling_op import pdb @tf.RegisterShape("RoiPool") def _roi_pool_shape(op): """Shape function for the RoiPool op. """ dims_data = op.inputs[0].get_shape().as_list() channels = dims_data[3] dims_rois = op.inputs[1].get_shape().as_list() num_rois = dims_rois[0] pooled_height = op.get_attr('pooled_height') pooled_width = op.get_attr('pooled_width') output_shape = tf.TensorShape([num_rois, pooled_height, pooled_width, channels]) return [output_shape, output_shape] @ops.RegisterGradient("RoiPool") def _roi_pool_grad(op, grad, _): """The gradients for `roi_pool`. Args: op: The `roi_pool` `Operation` that we are differentiating, which we can use to find the inputs and outputs of the original op. grad: Gradient with respect to the output of the `roi_pool` op. Returns: Gradients with respect to the input of `zero_out`. """ data = op.inputs[0] rois = op.inputs[1] argmax = op.outputs[1] pooled_height = op.get_attr('pooled_height') pooled_width = op.get_attr('pooled_width') spatial_scale = op.get_attr('spatial_scale') # compute gradient data_grad = roi_pooling_op.roi_pool_grad(data, rois, argmax, grad, pooled_height, pooled_width, spatial_scale) return [data_grad, None] # List of one Tensor, since we have one input
mit
msdubov/AST-text-analysis
east/main.py
2
6145
# -*- coding: utf-8 -* import getopt import os import sys from east import applications from east import consts from east import formatting from east.synonyms import synonyms from east import relevance from east import utils def main(): args = sys.argv[1:] opts, args = getopt.getopt(args, "s:a:w:v:l:f:c:r:p:dy") opts = dict(opts) # Default values for non-boolean options # Language of the text collection / keyphrases ("english" / "german" / "french" /...) opts.setdefault("-l", consts.Language.ENGLISH) # Relevance measures # Similarity measure to use ("ast" / "cosine") opts.setdefault("-s", consts.RelevanceMeasure.AST) # Algorithm to use for computing ASTs ("easa" / "ast_linear" / "ast_naive") opts.setdefault("-a", consts.ASTAlgorithm.EASA) # Term weighting scheme used for computing the cosine similarity ("tf-idf" / "tf") opts.setdefault("-w", consts.TermWeighting.TF_IDF) # Elements of the vector space for the cosine similarity ("stems" / "lemmata" / "words") opts.setdefault("-v", consts.VectorSpace.STEMS) # Graph construction opts.setdefault("-c", "0.6") # Referral confidence for graph construction opts.setdefault("-r", "0.25") # Relevance threshold of the matching score opts.setdefault("-p", "1") # Support threshold for graph nodes # NOTE(mikhaildubov): Default value of '-f' (output format) depends on the subcommand. if len(args) < 2: print("Invalid syntax: EAST should be called as:\n\n" " east [options] <command> <subcommand> args\n\n" "Commands available: keyphrases.\n" "Subcommands available: table/graph.") return 1 command = args[0] subcommand = args[1] if command == "keyphrases": if len(args) < 4: print('Invalid syntax. For keyphrases analysis, EAST should be called as:\n\n' ' east [options] keyphrases <subcommand> "path/to/keyphrases.txt" ' '"path/to/texts/dir"') return 1 # Keywords keyphrases_file = os.path.abspath(args[2]) with open(keyphrases_file) as f: # NOTE(mikhaildubov): utils.prepare_text() should not be called in clients like this # one; it is already called in the applications module. Note that # the double-calling of this method results in errors. keyphrases = f.read().splitlines() # Text collection (either a directory or a single file) text_collection_path = os.path.abspath(args[3]) if os.path.isdir(text_collection_path): text_files = [os.path.abspath(text_collection_path) + "/" + filename for filename in os.listdir(text_collection_path) if filename.endswith(".txt")] else: # TODO(mikhaildubov): Check that this single file ends with ".txt". text_files = [os.path.abspath(text_collection_path)] texts = {} # NOTE(mikhaildubov): If we have only one text file, we should split the lines. if len(text_files) == 1: with open(text_files[0]) as f: lines = f.read().splitlines() for i in xrange(len(lines)): texts[str(i)] = lines[i] # NOTE(mikhaildubov): If there are multiple text files, read them one-by-one. else: for filename in text_files: with open(filename) as f: text_name = os.path.basename(filename).decode("utf-8")[:-4] texts[text_name] = f.read() language = opts["-l"] # Similarity measure similarity_measure = opts["-s"] if similarity_measure == "ast": ast_algorithm = opts["-a"] normalized_scores = "-d" not in opts similarity_measure = relevance.ASTRelevanceMeasure(ast_algorithm, normalized_scores) elif similarity_measure == "cosine": vector_space = opts["-v"] term_weighting = opts["-w"] similarity_measure = relevance.CosineRelevanceMeasure(vector_space, term_weighting) # Synomimizer use_synonyms = "-y" in opts synonimizer = synonyms.SynonymExtractor(text_collection_path) if use_synonyms else None if subcommand == "table": keyphrases_table = applications.keyphrases_table( keyphrases, texts, similarity_measure_factory, synonimizer, language) opts.setdefault("-f", "xml") # Table output format ("csv" is the other option) table_format = opts["-f"].lower() try: res = formatting.format_table(keyphrases_table, table_format) print res except Exception as e: print e return 1 elif subcommand == "graph": # Graph construction parameters: Referral confidence, relevance and support thresholds referral_confidence = float(opts["-c"]) relevance_threshold = float(opts["-r"]) support_threshold = float(opts["-p"]) graph = applications.keyphrases_graph(keyphrases, texts, referral_confidence, relevance_threshold, support_threshold, similarity_measure, synonimizer, language) opts.setdefault("-f", "edges") # Graph output format (also "gml" possible) graph_format = opts["-f"].lower() try: res = formatting.format_graph(graph, graph_format) print res except Exception as e: print e return 1 else: print "Invalid subcommand: '%s'. Please use one of: 'table', 'graph'." % subcommand return 1 else: print "Invalid command: '%s'. Please use one of: 'keyphrases'." % command return 1 if __name__ == "__main__": main()
mit
reasonerjt/harbor
make/photon/prepare/commands/gencerts.py
3
1188
import os import sys import click import pathlib import logging from subprocess import Popen, PIPE, STDOUT, CalledProcessError from utils.cert import openssl_installed from utils.misc import get_realpath gen_tls_script = pathlib.Path(__file__).parent.parent.joinpath('scripts/gencert.sh').absolute() @click.command() @click.option('-p', '--path', required=True, type=str,help='the path to store generated cert files') @click.option('-d', '--days', default='365', type=str, help='the expired time for cert') def gencert(path, days): """ gencert command will generate cert files for internal TLS """ path = get_realpath(path) click.echo('Check openssl ...') if not openssl_installed(): raise(Exception('openssl not installed')) click.echo("start generate internal tls certs") if not os.path.exists(path): click.echo('path {} not exist, create it...'.format(path)) os.makedirs(path, exist_ok=True) with Popen([gen_tls_script, days], stdout=PIPE, stderr=STDOUT, cwd=path) as p: for line in p.stdout: click.echo(line, nl=False) if p.returncode != 0: raise CalledProcessError(p.returncode, p.args)
apache-2.0
kennedyshead/home-assistant
tests/components/mikrotik/test_init.py
8
3080
"""Test Mikrotik setup process.""" from unittest.mock import AsyncMock, Mock, patch from homeassistant.components import mikrotik from homeassistant.setup import async_setup_component from . import MOCK_DATA from tests.common import MockConfigEntry async def test_setup_with_no_config(hass): """Test that we do not discover anything or try to set up a hub.""" assert await async_setup_component(hass, mikrotik.DOMAIN, {}) is True assert mikrotik.DOMAIN not in hass.data async def test_successful_config_entry(hass): """Test config entry successful setup.""" entry = MockConfigEntry( domain=mikrotik.DOMAIN, data=MOCK_DATA, ) entry.add_to_hass(hass) mock_registry = Mock() with patch.object(mikrotik, "MikrotikHub") as mock_hub, patch( "homeassistant.helpers.device_registry.async_get_registry", return_value=mock_registry, ): mock_hub.return_value.async_setup = AsyncMock(return_value=True) mock_hub.return_value.serial_num = "12345678" mock_hub.return_value.model = "RB750" mock_hub.return_value.hostname = "mikrotik" mock_hub.return_value.firmware = "3.65" assert await mikrotik.async_setup_entry(hass, entry) is True assert len(mock_hub.mock_calls) == 2 p_hass, p_entry = mock_hub.mock_calls[0][1] assert p_hass is hass assert p_entry is entry assert len(mock_registry.mock_calls) == 1 assert mock_registry.mock_calls[0][2] == { "config_entry_id": entry.entry_id, "connections": {("mikrotik", "12345678")}, "manufacturer": mikrotik.ATTR_MANUFACTURER, "model": "RB750", "name": "mikrotik", "sw_version": "3.65", } async def test_hub_fail_setup(hass): """Test that a failed setup will not store the hub.""" entry = MockConfigEntry( domain=mikrotik.DOMAIN, data=MOCK_DATA, ) entry.add_to_hass(hass) with patch.object(mikrotik, "MikrotikHub") as mock_hub: mock_hub.return_value.async_setup = AsyncMock(return_value=False) assert await mikrotik.async_setup_entry(hass, entry) is False assert mikrotik.DOMAIN not in hass.data async def test_unload_entry(hass): """Test being able to unload an entry.""" entry = MockConfigEntry( domain=mikrotik.DOMAIN, data=MOCK_DATA, ) entry.add_to_hass(hass) with patch.object(mikrotik, "MikrotikHub") as mock_hub, patch( "homeassistant.helpers.device_registry.async_get_registry", return_value=Mock(), ): mock_hub.return_value.async_setup = AsyncMock(return_value=True) mock_hub.return_value.serial_num = "12345678" mock_hub.return_value.model = "RB750" mock_hub.return_value.hostname = "mikrotik" mock_hub.return_value.firmware = "3.65" assert await mikrotik.async_setup_entry(hass, entry) is True assert len(mock_hub.return_value.mock_calls) == 1 assert await mikrotik.async_unload_entry(hass, entry) assert entry.entry_id not in hass.data[mikrotik.DOMAIN]
apache-2.0
julien-hadleyjack/genrss-py
src/genrss/podcast.py
1
5482
#!/usr/bin/env python # -*- coding: utf-8 -*- from future.standard_library import install_aliases install_aliases() import os import re from operator import attrgetter from urllib.request import urlretrieve from io import open from PIL import Image from jinja2 import Environment, FileSystemLoader, StrictUndefined from . import CONFIG, get_logger, PATH class PodcastManager(object): def __init__(self): self.podcasts = {} self.fallback = Podcast(CONFIG["fallback"]["title"], is_collection=True) def get_all_podcasts(self): return list(self.podcasts.values()) + [self.fallback] def add_episode(self, episode): """ :param episode: :type episode: episode.Episode """ if episode.show not in CONFIG["shows"]: podcast = self.fallback elif episode.show not in self.podcasts: podcast = Podcast(episode.show) self.podcasts[episode.show] = podcast else: podcast = self.podcasts[episode.show] if not CONFIG["technical"]["check-episode"] or episode: podcast.episodes.append(episode) def generate_html(self): env = Environment(loader=FileSystemLoader(os.path.join(PATH, 'template')), autoescape=True, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined) template = env.get_template("index.html") output = template.render(config=CONFIG, manager=self) file_path = os.path.join(CONFIG["file-base"], CONFIG["technical"]["overview-path"]) with open(file_path, "w", encoding="utf8") as file: get_logger().info("Writing HTML overview at %s", file_path) file.write(output) def generate_rss(self): for podcast in self.get_all_podcasts(): podcast.save() class Podcast(): def __init__(self, title, short_description=None, html_description=None, is_collection=False): self.title = title self.episodes = [] self.is_collection = is_collection self.short_description = short_description or CONFIG["fallback"]["short-description"] self.html_description = html_description or CONFIG["fallback"]["html-description"] get_logger().debug("Creating podcast:\n\t%s", repr(self)) @staticmethod def format_date(dt): return dt.strftime("%a, %d %b %Y %H:%M:%S +0100") def image_url(self): image_url = CONFIG["fallback"]["image-url"] if not self.is_collection and len(self.episodes) > 0: image_location = None image_name = CONFIG["technical"]["image-name"] for episode in self.episodes: location = os.path.join(episode.directory_path, image_name) if os.path.exists(location): image_location = location image_url = episode.sub_directory + image_name break if not image_location: for episode in self.episodes: if os.path.exists(episode.directory_path): image_location = os.path.join(episode.directory_path, image_name) urlretrieve(episode.thumbnail, image_location) image_url = episode.sub_directory + image_name break if image_location: self.crop_image(image_location) return CONFIG["url-base"] + image_url @staticmethod def crop_image(image_location): # http://www.carlbednorz.de/python-create-square-thumbnails-from-images-with-pil/ img = Image.open(image_location) width, height = img.size if width != height: upper_x = int((width / 2) - (height / 2)) upper_y = 0 lower_x = int((width / 2) + (height / 2)) lower_y = height img = img.crop((upper_x, upper_y, lower_x, lower_y)) assert img.size[0] == img.size[1] get_logger().debug("Saving a new thumbnail at %s", image_location) img.save(image_location, "JPEG") def get_rss_filename(self): if not self.episodes: get_logger().info("No episodes found for %s. No rss file name.", self.title) elif self.is_collection: return CONFIG["fallback"]["rss-file"] else: return re.sub("[^a-zA-Z0-9_\-\./]+", "_", self.title) + ".rss" def save(self): if not self.episodes: get_logger().info("No episodes found for %s. Can't save rss feed", self.title) return sorted_episodes = sorted(self.episodes, key=attrgetter('time_added'), reverse=True) env = Environment(loader=FileSystemLoader(os.path.join(PATH, 'template')), autoescape=True, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined) template = env.get_template("feed.rss") output = template.render(config=CONFIG, sorted_episodes=sorted_episodes, podcast=self) file_path = os.path.join(CONFIG["file-base"], self.get_rss_filename()) with open(file_path, "w", encoding="utf8") as file: get_logger().info("Saving %d episodes at %s.", len(self.episodes), file_path) file.write(output) def __repr__(self): return "Podcast[title={self.title}, episodes={amount}]".format(amount=len(self.episodes), **locals())
bsd-2-clause
Spleen64/Sick-Beard
lib/subliminal/services/subswiki.py
35
5235
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <[email protected]> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..exceptions import ServiceError from ..language import language_set, Language from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords, split_keyword from ..videos import Episode, Movie from bs4 import BeautifulSoup import logging import urllib logger = logging.getLogger("subliminal") class SubsWiki(ServiceBase): server_url = 'http://www.subswiki.com' site_url = 'http://www.subswiki.com' api_based = False languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat']) language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'), u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'), u'English (UK)': Language('eng-GB')} language_code = 'name' videos = [Episode, Movie] require_video = False #required_features = ['permissive'] def list_checked(self, video, languages): results = [] if isinstance(video, Episode): results = self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode) elif isinstance(video, Movie) and video.year: results = self.query(video.path or video.release, languages, get_keywords(video.guess), movie=video.title, year=video.year) return results def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None, movie=None, year=None): if series and season and episode: request_series = series.lower().replace(' ', '_') if isinstance(request_series, unicode): request_series = request_series.encode('utf-8') logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) r = self.session.get('%s/serie/%s/%s/%s/' % (self.server_url, urllib.quote(request_series), season, episode)) if r.status_code == 404: logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) return [] elif movie and year: request_movie = movie.title().replace(' ', '_') if isinstance(request_movie, unicode): request_movie = request_movie.encode('utf-8') logger.debug(u'Getting subtitles for %s (%d) with languages %r' % (movie, year, languages)) r = self.session.get('%s/film/%s_(%d)' % (self.server_url, urllib.quote(request_movie), year)) if r.status_code == 404: logger.debug(u'Could not find subtitles for %s (%d) with languages %r' % (movie, year, languages)) return [] else: raise ServiceError('One or more parameter missing') if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return [] soup = BeautifulSoup(r.content, self.required_features) subtitles = [] for sub in soup('td', {'class': 'NewsTitle'}): sub_keywords = split_keyword(sub.b.string.lower()) if keywords and not keywords & sub_keywords: logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords)) continue for html_language in sub.parent.parent.find_all('td', {'class': 'language'}): language = self.get_language(html_language.string.strip()) if language not in languages: logger.debug(u'Language %r not in wanted languages %r' % (language, languages)) continue html_status = html_language.find_next_sibling('td') status = html_status.strong.string.strip() if status != 'Completado': logger.debug(u'Wrong subtitle status %s' % status) continue path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s%s' % (self.server_url, html_status.find_next('td').find('a')['href'])) subtitles.append(subtitle) return subtitles Service = SubsWiki
gpl-3.0
ClearCorp-dev/account-financial-reporting
account_move_line_report_xls/report/move_line_list_xls.py
25
17494
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import xlwt from datetime import datetime from openerp.osv import orm from openerp.report import report_sxw from openerp.addons.report_xls.report_xls import report_xls from openerp.addons.report_xls.utils import rowcol_to_cell, _render from openerp.tools.translate import translate, _ import logging _logger = logging.getLogger(__name__) _ir_translation_name = 'move.line.list.xls' class move_line_xls_parser(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(move_line_xls_parser, self).__init__( cr, uid, name, context=context) move_obj = self.pool.get('account.move.line') self.context = context wanted_list = move_obj._report_xls_fields(cr, uid, context) template_changes = move_obj._report_xls_template(cr, uid, context) self.localcontext.update({ 'datetime': datetime, 'wanted_list': wanted_list, 'template_changes': template_changes, '_': self._, }) def _(self, src): lang = self.context.get('lang', 'en_US') return translate(self.cr, _ir_translation_name, 'report', lang, src) \ or src class move_line_xls(report_xls): def __init__(self, name, table, rml=False, parser=False, header=True, store=False): super(move_line_xls, self).__init__( name, table, rml, parser, header, store) # Cell Styles _xs = self.xls_styles # header rh_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all'] self.rh_cell_style = xlwt.easyxf(rh_cell_format) self.rh_cell_style_center = xlwt.easyxf(rh_cell_format + _xs['center']) self.rh_cell_style_right = xlwt.easyxf(rh_cell_format + _xs['right']) # lines aml_cell_format = _xs['borders_all'] self.aml_cell_style = xlwt.easyxf(aml_cell_format) self.aml_cell_style_center = xlwt.easyxf( aml_cell_format + _xs['center']) self.aml_cell_style_date = xlwt.easyxf( aml_cell_format + _xs['left'], num_format_str=report_xls.date_format) self.aml_cell_style_decimal = xlwt.easyxf( aml_cell_format + _xs['right'], num_format_str=report_xls.decimal_format) # totals rt_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all'] self.rt_cell_style = xlwt.easyxf(rt_cell_format) self.rt_cell_style_right = xlwt.easyxf(rt_cell_format + _xs['right']) self.rt_cell_style_decimal = xlwt.easyxf( rt_cell_format + _xs['right'], num_format_str=report_xls.decimal_format) # XLS Template self.col_specs_template = { 'move': { 'header': [1, 20, 'text', _render("_('Entry')")], 'lines': [1, 0, 'text', _render("line.move_id.name or ''")], 'totals': [1, 0, 'text', None]}, 'name': { 'header': [1, 42, 'text', _render("_('Name')")], 'lines': [1, 0, 'text', _render("line.name or ''")], 'totals': [1, 0, 'text', None]}, 'ref': { 'header': [1, 42, 'text', _render("_('Reference')")], 'lines': [1, 0, 'text', _render("line.ref or ''")], 'totals': [1, 0, 'text', None]}, 'date': { 'header': [1, 13, 'text', _render("_('Effective Date')")], 'lines': [1, 0, 'date', _render("datetime.strptime(line.date,'%Y-%m-%d')"), None, self.aml_cell_style_date], 'totals': [1, 0, 'text', None]}, 'period': { 'header': [1, 12, 'text', _render("_('Period')")], 'lines': [1, 0, 'text', _render("line.period_id.code or line.period_id.name")], 'totals': [1, 0, 'text', None]}, 'partner': { 'header': [1, 36, 'text', _render("_('Partner')")], 'lines': [1, 0, 'text', _render("line.partner_id and line.partner_id.name or ''")], 'totals': [1, 0, 'text', None]}, 'partner_ref': { 'header': [1, 36, 'text', _render("_('Partner Reference')")], 'lines': [1, 0, 'text', _render("line.partner_id and line.partner_id.ref or ''")], 'totals': [1, 0, 'text', None]}, 'account': { 'header': [1, 12, 'text', _render("_('Account')")], 'lines': [1, 0, 'text', _render("line.account_id.code")], 'totals': [1, 0, 'text', None]}, 'date_maturity': { 'header': [1, 13, 'text', _render("_('Maturity Date')")], 'lines': [1, 0, _render("line.date_maturity and 'date' or 'text'"), _render( "line.date_maturity" " and datetime.strptime(line.date_maturity,'%Y-%m-%d')" " or None"), None, self.aml_cell_style_date], 'totals': [1, 0, 'text', None]}, 'debit': { 'header': [1, 18, 'text', _render("_('Debit')"), None, self.rh_cell_style_right], 'lines': [1, 0, 'number', _render("line.debit"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'number', None, _render("debit_formula"), self.rt_cell_style_decimal]}, 'credit': { 'header': [1, 18, 'text', _render("_('Credit')"), None, self.rh_cell_style_right], 'lines': [1, 0, 'number', _render("line.credit"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'number', None, _render("credit_formula"), self.rt_cell_style_decimal]}, 'balance': { 'header': [1, 18, 'text', _render("_('Balance')"), None, self.rh_cell_style_right], 'lines': [1, 0, 'number', None, _render("bal_formula"), self.aml_cell_style_decimal], 'totals': [1, 0, 'number', None, _render("bal_formula"), self.rt_cell_style_decimal]}, 'reconcile': { 'header': [1, 12, 'text', _render("_('Rec.')"), None, self.rh_cell_style_center], 'lines': [1, 0, 'text', _render("line.reconcile_id.name or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, 'reconcile_partial': { 'header': [1, 12, 'text', _render("_('Part. Rec.')"), None, self.rh_cell_style_center], 'lines': [1, 0, 'text', _render("line.reconcile_partial_id.name or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, 'tax_code': { 'header': [1, 12, 'text', _render("_('Tax Code')"), None, self.rh_cell_style_center], 'lines': [1, 0, 'text', _render("line.tax_code_id.code or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, 'tax_amount': { 'header': [1, 18, 'text', _render("_('Tax/Base Amount')"), None, self.rh_cell_style_right], 'lines': [1, 0, 'number', _render("line.tax_amount"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'text', None]}, 'amount_currency': { 'header': [1, 18, 'text', _render("_('Am. Currency')"), None, self.rh_cell_style_right], 'lines': [1, 0, _render("line.amount_currency and 'number' or 'text'"), _render("line.amount_currency or None"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'text', None]}, 'currency_name': { 'header': [1, 6, 'text', _render("_('Curr.')"), None, self.rh_cell_style_center], 'lines': [1, 0, 'text', _render("line.currency_id and line.currency_id.name or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, 'journal': { 'header': [1, 12, 'text', _render("_('Journal')")], 'lines': [1, 0, 'text', _render("line.journal_id.code or ''")], 'totals': [1, 0, 'text', None]}, 'company_currency': { 'header': [1, 10, 'text', _render("_('Comp. Curr.')")], 'lines': [1, 0, 'text', _render("line.company_id.currency_id.name or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, 'analytic_account': { 'header': [1, 36, 'text', _render("_('Analytic Account')")], 'lines': [1, 0, 'text', _render("line.analytic_account_id.code or ''")], 'totals': [1, 0, 'text', None]}, 'product': { 'header': [1, 36, 'text', _render("_('Product')")], 'lines': [1, 0, 'text', _render("line.product_id.name or ''")], 'totals': [1, 0, 'text', None]}, 'product_ref': { 'header': [1, 36, 'text', _render("_('Product Reference')")], 'lines': [1, 0, 'text', _render("line.product_id.default_code or ''")], 'totals': [1, 0, 'text', None]}, 'product_uom': { 'header': [1, 20, 'text', _render("_('Unit of Measure')")], 'lines': [1, 0, 'text', _render("line.product_uom_id.name or ''")], 'totals': [1, 0, 'text', None]}, 'quantity': { 'header': [1, 8, 'text', _render("_('Qty')"), None, self.rh_cell_style_right], 'lines': [1, 0, _render("line.quantity and 'number' or 'text'"), _render("line.quantity or None"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'text', None]}, 'statement': { 'header': [1, 20, 'text', _render("_('Statement')")], 'lines': [1, 0, 'text', _render("line.statement_id and line.statement_id.name or ''") ], 'totals': [1, 0, 'text', None]}, 'invoice': { 'header': [1, 20, 'text', _render("_('Invoice')")], 'lines': [1, 0, 'text', _render("line.invoice and line.invoice.number or ''")], 'totals': [1, 0, 'text', None]}, 'amount_residual': { 'header': [1, 18, 'text', _render("_('Residual Amount')"), None, self.rh_cell_style_right], 'lines': [1, 0, _render("line.amount_residual and 'number' or 'text'"), _render("line.amount_residual or None"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'text', None]}, 'amount_residual_currency': { 'header': [1, 18, 'text', _render("_('Res. Am. in Curr.')"), None, self.rh_cell_style_right], 'lines': [1, 0, _render( "line.amount_residual_currency and 'number' or 'text'"), _render("line.amount_residual_currency or None"), None, self.aml_cell_style_decimal], 'totals': [1, 0, 'text', None]}, 'narration': { 'header': [1, 42, 'text', _render("_('Notes')")], 'lines': [1, 0, 'text', _render("line.move_id.narration or ''")], 'totals': [1, 0, 'text', None]}, 'blocked': { 'header': [1, 4, 'text', _('Lit.'), None, self.rh_cell_style_right], 'lines': [1, 0, 'text', _render("line.blocked and 'x' or ''"), None, self.aml_cell_style_center], 'totals': [1, 0, 'text', None]}, } def generate_xls_report(self, _p, _xs, data, objects, wb): wanted_list = _p.wanted_list self.col_specs_template.update(_p.template_changes) _ = _p._ debit_pos = 'debit' in wanted_list and wanted_list.index('debit') credit_pos = 'credit' in wanted_list and wanted_list.index('credit') if not (credit_pos and debit_pos) and 'balance' in wanted_list: raise orm.except_orm( _('Customisation Error!'), _("The 'Balance' field is a calculated XLS field requiring \ the presence of the 'Debit' and 'Credit' fields !")) # report_name = objects[0]._description or objects[0]._name report_name = _("Journal Items") ws = wb.add_sheet(report_name[:31]) ws.panes_frozen = True ws.remove_splits = True ws.portrait = 0 # Landscape ws.fit_width_to_pages = 1 row_pos = 0 # set print header/footer ws.header_str = self.xls_headers['standard'] ws.footer_str = self.xls_footers['standard'] # Title cell_style = xlwt.easyxf(_xs['xls_title']) c_specs = [ ('report_name', 1, 0, 'text', report_name), ] row_data = self.xls_row_template(c_specs, ['report_name']) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=cell_style) row_pos += 1 # Column headers c_specs = map(lambda x: self.render( x, self.col_specs_template, 'header', render_space={'_': _p._}), wanted_list) row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs]) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=self.rh_cell_style, set_column_size=True) ws.set_horz_split_pos(row_pos) # account move lines for line in objects: debit_cell = rowcol_to_cell(row_pos, debit_pos) credit_cell = rowcol_to_cell(row_pos, credit_pos) bal_formula = debit_cell + '-' + credit_cell _logger.debug('dummy call - %s', bal_formula) c_specs = map( lambda x: self.render(x, self.col_specs_template, 'lines'), wanted_list) row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs]) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=self.aml_cell_style) # Totals aml_cnt = len(objects) debit_start = rowcol_to_cell(row_pos - aml_cnt, debit_pos) debit_stop = rowcol_to_cell(row_pos - 1, debit_pos) debit_formula = 'SUM(%s:%s)' % (debit_start, debit_stop) _logger.debug('dummy call - %s', debit_formula) credit_start = rowcol_to_cell(row_pos - aml_cnt, credit_pos) credit_stop = rowcol_to_cell(row_pos - 1, credit_pos) credit_formula = 'SUM(%s:%s)' % (credit_start, credit_stop) _logger.debug('dummy call - %s', credit_formula) debit_cell = rowcol_to_cell(row_pos, debit_pos) credit_cell = rowcol_to_cell(row_pos, credit_pos) bal_formula = debit_cell + '-' + credit_cell _logger.debug('dummy call - %s', bal_formula) c_specs = map( lambda x: self.render(x, self.col_specs_template, 'totals'), wanted_list) row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs]) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=self.rt_cell_style_right) move_line_xls('report.move.line.list.xls', 'account.move.line', parser=move_line_xls_parser)
agpl-3.0
t27/ol3
bin/check-whitespace.py
5
1582
import logging import re import sys logging.basicConfig(format='%(asctime)s %(name)s: %(message)s', level=logging.INFO) logger = logging.getLogger('check-whitespace') CR_RE = re.compile(r'\r') LEADING_WHITESPACE_RE = re.compile(r'\s+') TRAILING_WHITESPACE_RE = re.compile(r'\s+\n\Z') NO_NEWLINE_RE = re.compile(r'[^\n]\Z') ALL_WHITESPACE_RE = re.compile(r'\s+\Z') def check_whitespace(*filenames): errors = 0 for filename in sorted(filenames): whitespace = False for lineno, line in enumerate(open(filename, 'rU')): if lineno == 0 and LEADING_WHITESPACE_RE.match(line): logger.info('%s:%d: leading whitespace', filename, lineno + 1) errors += 1 if CR_RE.search(line): logger.info('%s:%d: carriage return character in line', filename, lineno + 1) errors += 1 if TRAILING_WHITESPACE_RE.search(line): logger.info('%s:%d: trailing whitespace', filename, lineno + 1) errors += 1 if NO_NEWLINE_RE.search(line): logger.info('%s:%d: no newline at end of file', filename, lineno + 1) errors += 1 whitespace = ALL_WHITESPACE_RE.match(line) if whitespace: logger.info('%s: trailing whitespace at end of file', filename) errors += 1 if errors: logger.error('%d whitespace errors' % (errors,)) if __name__ == "__main__": check_whitespace(*sys.argv[1:])
bsd-2-clause
JayvicWen/Crawler
kaoyan/crawl_post.py
2
2301
#!/usr/bin/env python # encoding:utf-8 import os import sys import requests import MySQLdb from bs4 import BeautifulSoup from bs4 import SoupStrainer from config import * base_url = 'http://download.kaoyan.com' status = [] def get_soup(url, parse_only=None): content = requests.get(url).content return BeautifulSoup(content, 'lxml', parse_only=parse_only) def mysql_connect(): global connection connection = MySQLdb.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB_DATABASE, port=3306, charset='utf8') def mysql_disconnect(): global connection connection.commit() connection.close() def crawl_post(url): status.append(url) global connection cursor = connection.cursor() cursor.execute( 'INSERT INTO `kaoyan_post`(`type`, `list_url`, `post_url`) VALUES (%s, %s, %s)', status ) cursor.close() status.pop() def crawl_list(list_id): soup = get_soup(base_url + '/list-%d' % list_id) thread_list = soup.find('div', attrs={'class': 'threadlist'}) if thread_list is None: print 'List not exists:', base_url + '/list-%d' % list_id return user_info_list = soup.find('div', attrs={'class': 'userinfolist'}) status.append('-'.join(user_info_list.span.get_text().split(u' » ')[2:])) url = base_url + '/list-%d' % list_id while url is not None: print 'Crawing list:', url status.append(url) soup = get_soup(url) table_dom = soup.find('div', attrs={'class': 'threadlist'}).table post_list_dom = table_dom.find_all('a') mysql_connect() for post_dom in post_list_dom: crawl_post(base_url + post_dom['href']) mysql_disconnect() status.pop() pages_dom = soup.find('div', {'class': 'pages'}) if pages_dom is None: break next_dom = pages_dom.find('a', {'class': 'next'}) if next_dom is None: break url = base_url + next_dom['href'] status.pop() if __name__ == '__main__': if len(sys.argv) != 3: print 'Invalid parameters!' exit(1) print '=' * 60 print 'start:', sys.argv for i in xrange(int(sys.argv[1]), int(sys.argv[2]) + 1): crawl_list(i)
mit
vnc-biz/pyzimbra
pyzimbra/z/admin.py
3
2605
# -*- coding: utf-8 -*- """ ################################################################################ # Copyright (c) 2010, Ilgar Mashayev # # E-mail: [email protected] # Website: http://github.com/ilgarm/pyzimbra ################################################################################ # This file is part of pyzimbra. # # Pyzimbra is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyzimbra is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>. ################################################################################ Zimbra privileged client. @author: ilgar """ from pyzimbra import sconstant, zconstant from pyzimbra.zclient import ZimbraSoapClient class ZimbraAdmin(ZimbraSoapClient): """ Zimbra non-privileged client. """ # ------------------------------------------------------------------ unbound def authenticate(self, account_name, password): """ Authenticates zimbra account. @param account_name: account email address @param password: account password @raise AuthException: if authentication fails @raise SoapException: if soap communication fails """ self.auth_token = self.authenticator.authenticate_admin(self.transport, account_name, password) def get_account(self): """ Gets account. @return: Account """ def change_password(self, account, password): """ Changes account password. @param account: account to change password for @param password: new password """ def get_info(self, account, params={}): """ Gets account info. @param account: account to get info for @param params: parameters to retrieve @return: AccountInfo """ res = self.invoke(zconstant.NS_ZIMBRA_ADMIN_URL, sconstant.GetInfoRequest, params) return res
lgpl-3.0
arummler/eudaq
legacy/producers/palpidess/scripts/slow_control/config_pALPIDE_driver.py
11
3952
#! /usr/bin/env python ## ## standard configuration of the FEC HLVDS for the use ## with the pALPIDE with the Padua Proximity board V1 ## import sys import os import SlowControl # slow control code import biasDAC # special code to setup up the voltage biases m = SlowControl.SlowControl(0) # HLVDS FEC (master) # was the integration time given as a commandline argument? if len(sys.argv) >= 2: integration_time = int(float(sys.argv[1])/0.00625) # Acquisition time is expected to specified in micro seconds (us) else: integration_time = 0xf600 # default value if len(sys.argv) >= 3: trigger_delay = int(float(sys.argv[2])/0.00625) # Acquisition time is expected to specified in micro seconds (us) else: trigger_delay = 0 # default value # 0x16 (22) readout control # # The bits (2:0) are not set during configuration, as they are set during the # (start-up of a) measurement. # # 3: driver loop mode (activates the complete sequence although no data is sent) # 2: single-event readout enable # 1: single-event readout request (rising edge sensitive, need to toggle!) # 0: continuous readout enable rdo_settings = 0x0 # 0x18 (24) trigger control # # 18-12: tlu wait cycles (default = 0x0c << 4) # 10- 4: tlu clock div (default = 0x00 << 12) # 3: driver busy enable ( 0x8 ) # 2: tlu reset enable ( 0x4 ) # 1- 0: trig mode (0b00 = auto/continuously, 0b01 = NIM in, # 0b10 = TLU triggering, 0b11 valid based) trg_settings = 0x1 | (0x0c << 4) | (0x00 << 12) ### FEC HLVDS configuration registers # all times/delays are specified in multiples of 6.25 ns values_HLVDS = [ # explorer driver configuration 0b1100, # 0x00 ( 0) enable digital pulsing (0), clock init state (1), # repeat global reset (2), activate mem_wr_en (3) 0x10, # 0x01 ( 1) length of GRSTb signal [init_dly_t] 0xa0, # 0x02 ( 2) length of the analog pulser reference pulse [dly_t] trigger_delay, # 0x03 ( 3) pre-acqusition delay (in between trigger and acqs start) # [pre_acq_dly_t] integration_time, # 0x04 ( 4) integration time [acq_time_t] 0x0, # 0x05 ( 5) delay in between acquisition and readout [rdo_dly_t] 0xb, # 0x06 ( 6) transport delay of the signals from SRS -> pALPIDE -> SRS # [transport_dly_t] 0x1, # 0x07 ( 7) clock divider for the output clock [clk_div_t] 0x3, # 0x08 ( 8) readout frequency divider [rdo_div_t] 0x0, # 0x09 ( 9) post-event delay [post_evt_dly_t] 0b0111111100000, # 0x0a (10) global reset settings by state (0 = active, 1 = inactive) # .*=-.*=-.*=-. # stReset (0), stInit (1), stDly (2), stTrigWait (3), stPreAcq (4), # stAcq (5), stAcqWait (6), stRdoWait (7), stRdoStart (8), # stRdoFirst (9), stRdoPause (10), stRdo (11), stPostEvtDly (12) 0x2, # 0x0b (11) nim_out signal assignment (0 = off, 1 = on) # a_pulse_ref (0), combined_busy (1) 0x0, # 0x0c (12) 0x0, # 0x0d (13) 0x0, # 0x0e (14) 0x0, # 0x0f (15) 0x0, # 0x10 (16) 0x0, # 0x11 (17) 0x0, # 0x12 (18) 0x0, # 0x13 (19) 0x0, # 0x14 (20) 0x0, # 0x15 (21) # general configuration rdo_settings, # 0x16 (22) readout control 8832, # 0x17 (23) maximum frame size trg_settings # 0x18 (24) trigger control ] # all higher addresses are read-only SlowControl.write_burst(m, 6039, 0x0, values_HLVDS, False) biasDAC.set_bias_voltage(12, 1.6, m) # Vreset biasDAC.set_bias_voltage(8, 0.4, m) # VCASN biasDAC.set_bias_voltage(10, 0.6, m) # VCASP quit()
lgpl-3.0
ljgabc/lfs
usr/lib/python2.7/test/test_zipimport.py
128
16817
import sys import os import marshal import imp import struct import time import unittest from test import test_support from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co # some tests can be ran even without zlib try: import zlib except ImportError: zlib = None from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED import zipimport import linecache import doctest import inspect import StringIO from traceback import extract_tb, extract_stack, print_tb raise_src = 'def do_raise(): raise TypeError\n' def make_pyc(co, mtime): data = marshal.dumps(co) if type(mtime) is type(0.0): # Mac mtimes need a bit of special casing if mtime < 0x7fffffff: mtime = int(mtime) else: mtime = int(-0x100000000L + long(mtime)) pyc = imp.get_magic() + struct.pack("<i", int(mtime)) + data return pyc def module_path_to_dotted_name(path): return path.replace(os.sep, '.') NOW = time.time() test_pyc = make_pyc(test_co, NOW) if __debug__: pyc_ext = ".pyc" else: pyc_ext = ".pyo" TESTMOD = "ziptestmodule" TESTPACK = "ziptestpackage" TESTPACK2 = "ziptestpackage2" TEMP_ZIP = os.path.abspath("junk95142" + os.extsep + "zip") class UncompressedZipImportTestCase(ImportHooksBaseTestCase): compression = ZIP_STORED def setUp(self): # We're reusing the zip archive path, so we must clear the # cached directory info and linecache linecache.clearcache() zipimport._zip_directory_cache.clear() ImportHooksBaseTestCase.setUp(self) def doTest(self, expected_ext, files, *modules, **kw): z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() stuff = kw.get("stuff", None) if stuff is not None: # Prepend 'stuff' to the start of the zipfile f = open(TEMP_ZIP, "rb") data = f.read() f.close() f = open(TEMP_ZIP, "wb") f.write(stuff) f.write(data) f.close() sys.path.insert(0, TEMP_ZIP) mod = __import__(".".join(modules), globals(), locals(), ["__dummy__"]) call = kw.get('call') if call is not None: call(mod) if expected_ext: file = mod.get_file() self.assertEqual(file, os.path.join(TEMP_ZIP, *modules) + expected_ext) finally: z.close() os.remove(TEMP_ZIP) def testAFakeZlib(self): # # This could cause a stack overflow before: importing zlib.py # from a compressed archive would cause zlib to be imported # which would find zlib.py in the archive, which would... etc. # # This test *must* be executed first: it must be the first one # to trigger zipimport to import zlib (zipimport caches the # zlib.decompress function object, after which the problem being # tested here wouldn't be a problem anymore... # (Hence the 'A' in the test method name: to make it the first # item in a list sorted by name, like unittest.makeSuite() does.) # # This test fails on platforms on which the zlib module is # statically linked, but the problem it tests for can't # occur in that case (builtin modules are always found first), # so we'll simply skip it then. Bug #765456. # if "zlib" in sys.builtin_module_names: return if "zlib" in sys.modules: del sys.modules["zlib"] files = {"zlib.py": (NOW, test_src)} try: self.doTest(".py", files, "zlib") except ImportError: if self.compression != ZIP_DEFLATED: self.fail("expected test to not raise ImportError") else: if self.compression != ZIP_STORED: self.fail("expected test to raise ImportError") def testPy(self): files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD) def testPyc(self): files = {TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTMOD) def testBoth(self): files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTMOD) def testEmptyPy(self): files = {TESTMOD + ".py": (NOW, "")} self.doTest(None, files, TESTMOD) def testBadMagic(self): # make pyc magic word invalid, forcing loading from .py m0 = ord(test_pyc[0]) m0 ^= 0x04 # flip an arbitrary bit badmagic_pyc = chr(m0) + test_pyc[1:] files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, badmagic_pyc)} self.doTest(".py", files, TESTMOD) def testBadMagic2(self): # make pyc magic word invalid, causing an ImportError m0 = ord(test_pyc[0]) m0 ^= 0x04 # flip an arbitrary bit badmagic_pyc = chr(m0) + test_pyc[1:] files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)} try: self.doTest(".py", files, TESTMOD) except ImportError: pass else: self.fail("expected ImportError; import from bad pyc") def testBadMTime(self): t3 = ord(test_pyc[7]) t3 ^= 0x02 # flip the second bit -- not the first as that one # isn't stored in the .py's mtime in the zip archive. badtime_pyc = test_pyc[:7] + chr(t3) + test_pyc[8:] files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, badtime_pyc)} self.doTest(".py", files, TESTMOD) def testPackage(self): packdir = TESTPACK + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir + TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTPACK, TESTMOD) def testDeepPackage(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD) def testZipImporterMethods(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP) self.assertEqual(zi.archive, TEMP_ZIP) self.assertEqual(zi.is_package(TESTPACK), True) mod = zi.load_module(TESTPACK) self.assertEqual(zi.get_filename(TESTPACK), mod.__file__) self.assertEqual(zi.is_package(packdir + '__init__'), False) self.assertEqual(zi.is_package(packdir + TESTPACK2), True) self.assertEqual(zi.is_package(packdir2 + TESTMOD), False) mod_path = packdir2 + TESTMOD mod_name = module_path_to_dotted_name(mod_path) __import__(mod_name) mod = sys.modules[mod_name] self.assertEqual(zi.get_source(TESTPACK), None) self.assertEqual(zi.get_source(mod_path), None) self.assertEqual(zi.get_filename(mod_path), mod.__file__) # To pass in the module name instead of the path, we must use the right importer loader = mod.__loader__ self.assertEqual(loader.get_source(mod_name), None) self.assertEqual(loader.get_filename(mod_name), mod.__file__) # test prefix and archivepath members zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK) self.assertEqual(zi2.archive, TEMP_ZIP) self.assertEqual(zi2.prefix, TESTPACK + os.sep) finally: z.close() os.remove(TEMP_ZIP) def testZipImporterMethodsInSubDirectory(self): packdir = TESTPACK + os.sep packdir2 = packdir + TESTPACK2 + os.sep files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc), packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)} z = ZipFile(TEMP_ZIP, "w") try: for name, (mtime, data) in files.items(): zinfo = ZipInfo(name, time.localtime(mtime)) zinfo.compress_type = self.compression z.writestr(zinfo, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir) self.assertEqual(zi.archive, TEMP_ZIP) self.assertEqual(zi.prefix, packdir) self.assertEqual(zi.is_package(TESTPACK2), True) mod = zi.load_module(TESTPACK2) self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__) self.assertEqual(zi.is_package(TESTPACK2 + os.sep + '__init__'), False) self.assertEqual(zi.is_package(TESTPACK2 + os.sep + TESTMOD), False) mod_path = TESTPACK2 + os.sep + TESTMOD mod_name = module_path_to_dotted_name(mod_path) __import__(mod_name) mod = sys.modules[mod_name] self.assertEqual(zi.get_source(TESTPACK2), None) self.assertEqual(zi.get_source(mod_path), None) self.assertEqual(zi.get_filename(mod_path), mod.__file__) # To pass in the module name instead of the path, we must use the right importer loader = mod.__loader__ self.assertEqual(loader.get_source(mod_name), None) self.assertEqual(loader.get_filename(mod_name), mod.__file__) finally: z.close() os.remove(TEMP_ZIP) def testGetData(self): z = ZipFile(TEMP_ZIP, "w") z.compression = self.compression try: name = "testdata.dat" data = "".join([chr(x) for x in range(256)]) * 500 z.writestr(name, data) z.close() zi = zipimport.zipimporter(TEMP_ZIP) self.assertEqual(data, zi.get_data(name)) self.assertIn('zipimporter object', repr(zi)) finally: z.close() os.remove(TEMP_ZIP) def testImporterAttr(self): src = """if 1: # indent hack def get_file(): return __file__ if __loader__.get_data("some.data") != "some data": raise AssertionError, "bad data"\n""" pyc = make_pyc(compile(src, "<???>", "exec"), NOW) files = {TESTMOD + pyc_ext: (NOW, pyc), "some.data": (NOW, "some data")} self.doTest(pyc_ext, files, TESTMOD) def testImport_WithStuff(self): # try importing from a zipfile which contains additional # stuff at the beginning of the file files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD, stuff="Some Stuff"*31) def assertModuleSource(self, module): self.assertEqual(inspect.getsource(module), test_src) def testGetSource(self): files = {TESTMOD + ".py": (NOW, test_src)} self.doTest(".py", files, TESTMOD, call=self.assertModuleSource) def testGetCompiledSource(self): pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW) files = {TESTMOD + ".py": (NOW, test_src), TESTMOD + pyc_ext: (NOW, pyc)} self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource) def runDoctest(self, callback): files = {TESTMOD + ".py": (NOW, test_src), "xyz.txt": (NOW, ">>> log.append(True)\n")} self.doTest(".py", files, TESTMOD, call=callback) def doDoctestFile(self, module): log = [] old_master, doctest.master = doctest.master, None try: doctest.testfile( 'xyz.txt', package=module, module_relative=True, globs=locals() ) finally: doctest.master = old_master self.assertEqual(log,[True]) def testDoctestFile(self): self.runDoctest(self.doDoctestFile) def doDoctestSuite(self, module): log = [] doctest.DocFileTest( 'xyz.txt', package=module, module_relative=True, globs=locals() ).run() self.assertEqual(log,[True]) def testDoctestSuite(self): self.runDoctest(self.doDoctestSuite) def doTraceback(self, module): try: module.do_raise() except: tb = sys.exc_info()[2].tb_next f,lno,n,line = extract_tb(tb, 1)[0] self.assertEqual(line, raise_src.strip()) f,lno,n,line = extract_stack(tb.tb_frame, 1)[0] self.assertEqual(line, raise_src.strip()) s = StringIO.StringIO() print_tb(tb, 1, s) self.assertTrue(s.getvalue().endswith(raise_src)) else: raise AssertionError("This ought to be impossible") def testTraceback(self): files = {TESTMOD + ".py": (NOW, raise_src)} self.doTest(None, files, TESTMOD, call=self.doTraceback) @unittest.skipUnless(zlib, "requires zlib") class CompressedZipImportTestCase(UncompressedZipImportTestCase): compression = ZIP_DEFLATED class BadFileZipImportTestCase(unittest.TestCase): def assertZipFailure(self, filename): self.assertRaises(zipimport.ZipImportError, zipimport.zipimporter, filename) def testNoFile(self): self.assertZipFailure('AdfjdkFJKDFJjdklfjs') def testEmptyFilename(self): self.assertZipFailure('') def testBadArgs(self): self.assertRaises(TypeError, zipimport.zipimporter, None) self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None) def testFilenameTooLong(self): self.assertZipFailure('A' * 33000) def testEmptyFile(self): test_support.unlink(TESTMOD) open(TESTMOD, 'w+').close() self.assertZipFailure(TESTMOD) def testFileUnreadable(self): test_support.unlink(TESTMOD) fd = os.open(TESTMOD, os.O_CREAT, 000) try: os.close(fd) self.assertZipFailure(TESTMOD) finally: # If we leave "the read-only bit" set on Windows, nothing can # delete TESTMOD, and later tests suffer bogus failures. os.chmod(TESTMOD, 0666) test_support.unlink(TESTMOD) def testNotZipFile(self): test_support.unlink(TESTMOD) fp = open(TESTMOD, 'w+') fp.write('a' * 22) fp.close() self.assertZipFailure(TESTMOD) # XXX: disabled until this works on Big-endian machines def _testBogusZipFile(self): test_support.unlink(TESTMOD) fp = open(TESTMOD, 'w+') fp.write(struct.pack('=I', 0x06054B50)) fp.write('a' * 18) fp.close() z = zipimport.zipimporter(TESTMOD) try: self.assertRaises(TypeError, z.find_module, None) self.assertRaises(TypeError, z.load_module, None) self.assertRaises(TypeError, z.is_package, None) self.assertRaises(TypeError, z.get_code, None) self.assertRaises(TypeError, z.get_data, None) self.assertRaises(TypeError, z.get_source, None) error = zipimport.ZipImportError self.assertEqual(z.find_module('abc'), None) self.assertRaises(error, z.load_module, 'abc') self.assertRaises(error, z.get_code, 'abc') self.assertRaises(IOError, z.get_data, 'abc') self.assertRaises(error, z.get_source, 'abc') self.assertRaises(error, z.is_package, 'abc') finally: zipimport._zip_directory_cache.clear() def test_main(): try: test_support.run_unittest( UncompressedZipImportTestCase, CompressedZipImportTestCase, BadFileZipImportTestCase, ) finally: test_support.unlink(TESTMOD) if __name__ == "__main__": test_main()
gpl-2.0
dylanparsons/Sample-Code
python/pickle.py
1
3287
import pickle import math import random def arrow(n): return str(n)+"--> " def isPositive(aNumber): return aNumber > 0 def abs(aNumber): if aNumber >= 0: return aNumber return -aNumber def main2(): """ test two build in high order functions: map and filter functions""" oldList = [-10, -20, 0, 30,40] print("oldList: ", oldList) newList = [] # a string version of oldList for number in oldList: newList.append(str(number)) print("newList: ", newList) newList2 = list( map(str, oldList) ) print("newList2: ", newList2) newList3 = list( map(arrow, oldList) ) print("newList3: ", newList3) newList4 = list( filter(isPositive, oldList) ) print("newList4: ", newList4) newList5 = list( map(abs, oldList) ) print("newList5: ", newList5) newList6 = list( filter(abs, oldList) ) print("newList6: ", newList6) print() def main3(): """ test pickling""" lyst = ["COMP", 164, "-03", "pi=", 3.14] fileObj = open("items.dat", "wb") for item in lyst: pickle.dump(item, fileObj) fileObj.close() lyst2 = list() fileObj = open("items.dat", "rb") while True: try: item = pickle.load(fileObj) lyst2.append(item) except EOFError: break fileObj.close() print(lyst2) fileObj = open("items2.dat", "wb") pickle.dump(lyst, fileObj) fileObj.close() print() def main4(): """ test pickling""" lyst = ["COMP", 164, "-03", "pi=", 3.14] fileObj = open("items.dat", "wb") pickle.dump(lyst, fileObj) fileObj.close() fileObj = open("items.dat", "rb") lyst2 = pickle.load(fileObj) fileObj.close() print(lyst2) print() def main5(): """ test two build in high order functions: map and filter functions""" oldList = [] for i in range(10): oldList.append(random.randint(-500, 500)) print("oldList: ", oldList) newList = [] newList = list( map(polynomial, oldList) ) print("NewList: ", newList) newNewList = list(filter(isBetween100, oldList)) print("Number of Numbers between -100 and 100: ", len(newNewList)) newJar = open("name.dat", "wb") pickle.dump(newList, newJar) newJar.close() NewestList=[] oldJar=open("name.dat", "rb") NewestList = pickle.load(oldJar) oldJar.close() print("Here is the unpickled list: ") print(NewestList) print() print("newList: ", newList) newList2 = list( map(str, oldList) ) print("newList2: ", newList2) newList3 = list( map(arrow, oldList) ) print("newList3: ", newList3) newList4 = list( filter(isPositive, oldList) ) print("newList4: ", newList4) newList5 = list( map(abs, oldList) ) print("newList5: ", newList5) newList6 = list( filter(abs, oldList) ) print("newList6: ", newList6) print() def polynomial(x): return ((3*x*x)-2*x-1) def isBetween100(x): if x>=-100 and x<=100: return True return False if __name__ == "__main__": main2() main3() main4() main5()
gpl-3.0
MiLk/youtube-dl
youtube_dl/extractor/ign.py
12
4549
from __future__ import unicode_literals import re from .common import InfoExtractor class IGNIE(InfoExtractor): """ Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com. Some videos of it.ign.com are also supported """ _VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)' IE_NAME = 'ign.com' _CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config' _DESCRIPTION_RE = [ r'<span class="page-object-description">(.+?)</span>', r'id="my_show_video">.*?<p>(.*?)</p>', ] _TESTS = [ { 'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review', 'md5': 'eac8bdc1890980122c3b66f14bdd02e9', 'info_dict': { 'id': '8f862beef863986b2785559b9e1aa599', 'ext': 'mp4', 'title': 'The Last of Us Review', 'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c', } }, { 'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind', 'playlist': [ { 'info_dict': { 'id': '5ebbd138523268b93c9141af17bec937', 'ext': 'mp4', 'title': 'GTA 5 Video Review', 'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.', }, }, { 'info_dict': { 'id': '638672ee848ae4ff108df2a296418ee2', 'ext': 'mp4', 'title': '26 Twisted Moments from GTA 5 in Slow Motion', 'description': 'The twisted beauty of GTA 5 in stunning slow motion.', }, }, ], 'params': { 'skip_download': True, }, }, ] def _find_video_id(self, webpage): res_id = [ r'data-video-id="(.+?)"', r'<object id="vid_(.+?)"', r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"', ] return self._search_regex(res_id, webpage, 'video id') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) name_or_id = mobj.group('name_or_id') page_type = mobj.group('type') webpage = self._download_webpage(url, name_or_id) if page_type == 'articles': video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url') return self.url_result(video_url, ie='IGN') elif page_type != 'video': multiple_urls = re.findall( '<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]', webpage) if multiple_urls: return [self.url_result(u, ie='IGN') for u in multiple_urls] video_id = self._find_video_id(webpage) result = self._get_video_info(video_id) description = self._html_search_regex(self._DESCRIPTION_RE, webpage, 'video description', flags=re.DOTALL) result['description'] = description return result def _get_video_info(self, video_id): config_url = self._CONFIG_URL_TEMPLATE % video_id config = self._download_json(config_url, video_id) media = config['playlist']['media'] return { 'id': media['metadata']['videoId'], 'url': media['url'], 'title': media['metadata']['title'], 'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'), } class OneUPIE(IGNIE): _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)' IE_NAME = '1up.com' _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>' _TESTS = [{ 'url': 'http://gamevideos.1up.com/video/id/34976', 'md5': '68a54ce4ebc772e4b71e3123d413163d', 'info_dict': { 'id': '34976', 'ext': 'mp4', 'title': 'Sniper Elite V2 - Trailer', 'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf', } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) result = super(OneUPIE, self)._real_extract(url) result['id'] = mobj.group('name_or_id') return result
unlicense
abenzbiria/clients_odoo
addons/website_hr_recruitment/controllers/main.py
19
5703
# -*- coding: utf-8 -*- import base64 from openerp import SUPERUSER_ID from openerp import http from openerp.tools.translate import _ from openerp.http import request from openerp.addons.website.models.website import slug class website_hr_recruitment(http.Controller): @http.route([ '/jobs', '/jobs/country/<model("res.country"):country>', '/jobs/department/<model("hr.department"):department>', '/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>', '/jobs/office/<int:office_id>', '/jobs/country/<model("res.country"):country>/office/<int:office_id>', '/jobs/department/<model("hr.department"):department>/office/<int:office_id>', '/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>/office/<int:office_id>', ], type='http', auth="public", website=True) def jobs(self, country=None, department=None, office_id=None): env = request.env(context=dict(request.env.context, show_address=True, no_tag_br=True)) Country = env['res.country'] Jobs = env['hr.job'] # List jobs available to current UID job_ids = Jobs.search([], order="website_published desc,no_of_recruitment desc").ids # Browse jobs as superuser, because address is restricted jobs = Jobs.sudo().browse(job_ids) # Deduce departments and offices of those jobs departments = set(j.department_id for j in jobs if j.department_id) offices = set(j.address_id for j in jobs if j.address_id) countries = set(o.country_id for o in offices if o.country_id) # Default search by user country if not (country or department or office_id): country_code = request.session['geoip'].get('country_code') if country_code: countries_ = Country.search([('code', '=', country_code)]) country = countries_[0] if countries_ else None # Filter the matching one if country: jobs = (j for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id) if department: jobs = (j for j in jobs if j.department_id and j.department_id.id == department.id) if office_id: jobs = (j for j in jobs if j.address_id and j.address_id.id == office_id) # Render page return request.website.render("website_hr_recruitment.index", { 'jobs': jobs, 'countries': countries, 'departments': departments, 'offices': offices, 'country_id': country, 'department_id': department, 'office_id': office_id, }) @http.route('/jobs/add', type='http', auth="user", website=True) def jobs_add(self, **kwargs): job = request.env['hr.job'].create({ 'name': _('New Job Offer'), }) return request.redirect("/jobs/detail/%s?enable_editor=1" % slug(job)) @http.route('/jobs/detail/<model("hr.job"):job>', type='http', auth="public", website=True) def jobs_detail(self, job, **kwargs): return request.render("website_hr_recruitment.detail", { 'job': job, 'main_object': job, }) @http.route('/jobs/apply/<model("hr.job"):job>', type='http', auth="public", website=True) def jobs_apply(self, job): error = {} default = {} if 'website_hr_recruitment_error' in request.session: error = request.session.pop('website_hr_recruitment_error') default = request.session.pop('website_hr_recruitment_default') return request.render("website_hr_recruitment.apply", { 'job': job, 'error': error, 'default': default, }) @http.route('/jobs/thankyou', methods=['POST'], type='http', auth="public", website=True) def jobs_thankyou(self, **post): error = {} for field_name in ["partner_name", "phone", "email_from"]: if not post.get(field_name): error[field_name] = 'missing' if error: request.session['website_hr_recruitment_error'] = error ufile = post.pop('ufile') if ufile: error['ufile'] = 'reset' request.session['website_hr_recruitment_default'] = post return request.redirect('/jobs/apply/%s' % post.get("job_id")) # public user can't create applicants (duh) env = request.env(user=SUPERUSER_ID) value = { 'source_id' : env.ref('hr_recruitment.source_website_company').id, 'name': '%s\'s Application' % post.get('partner_name'), } for f in ['email_from', 'partner_name', 'description']: value[f] = post.get(f) for f in ['department_id', 'job_id']: value[f] = int(post.get(f) or 0) # Retro-compatibility for saas-3. "phone" field should be replace by "partner_phone" in the template in trunk. value['partner_phone'] = post.pop('phone', False) applicant_id = env['hr.applicant'].create(value).id if post['ufile']: attachment_value = { 'name': post['ufile'].filename, 'res_name': value['partner_name'], 'res_model': 'hr.applicant', 'res_id': applicant_id, 'datas': base64.encodestring(post['ufile'].read()), 'datas_fname': post['ufile'].filename, } env['ir.attachment'].create(attachment_value) return request.render("website_hr_recruitment.thankyou", {}) # vim :et:
agpl-3.0
sargas/scipy
scipy/stats/tests/test_mstats_extras.py
4
4790
# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for maskedArray statistics. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ from __future__ import division, print_function, absolute_import __author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)" import numpy as np import numpy.ma as ma import scipy.stats.mstats as ms #import scipy.stats.mmorestats as mms from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_almost_equal, assert_ class TestMisc(TestCase): # def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) # def test_mjci(self): "Tests the Marits-Jarrett estimator" data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262, 296,299,306,376,428,515,666,1310,2611]) assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5) # def test_trimmedmeanci(self): "Tests the confidence intervals of the trimmed mean." data = ma.array([545,555,558,572,575,576,578,580, 594,605,635,651,653,661,666]) assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1) assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1), [561.8, 630.6]) # def test_idealfourths(self): "Tests ideal-fourths" test = np.arange(100) assert_almost_equal(np.asarray(ms.idealfourths(test)), [24.416667,74.583333],6) test_2D = test.repeat(3).reshape(-1,3) assert_almost_equal(ms.idealfourths(test_2D, axis=0), [[24.416667,24.416667,24.416667], [74.583333,74.583333,74.583333]],6) assert_almost_equal(ms.idealfourths(test_2D, axis=1), test.repeat(2).reshape(-1,2)) test = [0,0] _result = ms.idealfourths(test) assert_(np.isnan(_result).all()) #.............................................................................. class TestQuantiles(TestCase): # def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) # def test_hdquantiles(self): data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] # assert_almost_equal(ms.hdquantiles(data,[0., 1.]), [0.006514031, 0.995309248]) hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75]) assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75]) assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4) # data = np.array(data).reshape(10,10) hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0) assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75])) assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75])) hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True) assert_almost_equal(hdq[...,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True)) assert_almost_equal(hdq[...,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True)) ############################################################################### if __name__ == "__main__": run_module_suite()
bsd-3-clause
cohortfsllc/cohort-cocl2-sandbox
buildbot/buildbot_lib.py
2
21805
#!/usr/bin/python # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import optparse import os.path import shutil import subprocess import stat import sys import time import traceback ARCH_MAP = { '32': { 'gyp_arch': 'ia32', 'scons_platform': 'x86-32', }, '64': { 'gyp_arch': 'x64', 'scons_platform': 'x86-64', }, 'arm': { 'gyp_arch': 'arm', 'scons_platform': 'arm', }, 'mips32': { 'gyp_arch': 'mips32', 'scons_platform': 'mips32', }, } def RunningOnBuildbot(): return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None def GetHostPlatform(): sys_platform = sys.platform.lower() if sys_platform.startswith('linux'): return 'linux' elif sys_platform in ('win', 'win32', 'windows', 'cygwin'): return 'win' elif sys_platform in ('darwin', 'mac'): return 'mac' else: raise Exception('Can not determine the platform!') def SetDefaultContextAttributes(context): """ Set default values for the attributes needed by the SCons function, so that SCons can be run without needing ParseStandardCommandLine """ platform = GetHostPlatform() context['platform'] = platform context['mode'] = 'opt' context['default_scons_mode'] = ['opt-host', 'nacl'] context['default_scons_platform'] = ('x86-64' if platform == 'win' else 'x86-32') context['android'] = False context['clang'] = False context['asan'] = False context['pnacl'] = False context['use_glibc'] = False context['use_breakpad_tools'] = False context['max_jobs'] = 8 context['scons_args'] = [] # Windows-specific environment manipulation def SetupWindowsEnvironment(context): # Poke around looking for MSVC. We should do something more principled in # the future. # The name of Program Files can differ, depending on the bittage of Windows. program_files = r'c:\Program Files (x86)' if not os.path.exists(program_files): program_files = r'c:\Program Files' if not os.path.exists(program_files): raise Exception('Cannot find the Program Files directory!') # The location of MSVC can differ depending on the version. msvc_locs = [ ('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'), ('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'), ('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'), ('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'), ] for dirname, comntools_var, gyp_msvs_version in msvc_locs: msvc = os.path.join(program_files, dirname) context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version) if os.path.exists(msvc): break else: # The break statement did not execute. raise Exception('Cannot find MSVC!') # Put MSVC in the path. vc = os.path.join(msvc, 'VC') comntools = os.path.join(msvc, 'Common7', 'Tools') perf = os.path.join(msvc, 'Team Tools', 'Performance Tools') context.SetEnv('PATH', os.pathsep.join([ context.GetEnv('PATH'), vc, comntools, perf])) # SCons needs this variable to find vsvars.bat. # The end slash is needed because the batch files expect it. context.SetEnv(comntools_var, comntools + '\\') # This environment variable will SCons to print debug info while it searches # for MSVC. context.SetEnv('SCONS_MSCOMMON_DEBUG', '-') # Needed for finding devenv. context['msvc'] = msvc SetupGyp(context, []) def SetupGyp(context, extra_vars=[]): context.SetEnv('GYP_GENERATORS', 'ninja') if RunningOnBuildbot(): goma_opts = [ 'use_goma=1', 'gomadir=/b/build/goma', ] else: goma_opts = [] context.SetEnv('GYP_DEFINES', ' '.join( context['gyp_vars'] + goma_opts + extra_vars)) def SetupLinuxEnvironment(context): SetupGyp(context, ['target_arch='+context['gyp_arch']]) def SetupMacEnvironment(context): SetupGyp(context, ['target_arch='+context['gyp_arch']]) def SetupAndroidEnvironment(context): SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']]) context.SetEnv('GYP_GENERATORS', 'ninja') context.SetEnv('GYP_CROSSCOMPILE', '1') def ParseStandardCommandLine(context): """ The standard buildbot scripts require 3 arguments to run. The first argument (dbg/opt) controls if the build is a debug or a release build. The second argument (32/64) controls the machine architecture being targeted. The third argument (newlib/glibc) controls which c library we're using for the nexes. Different buildbots may have different sets of arguments. """ parser = optparse.OptionParser() parser.add_option('-n', '--dry-run', dest='dry_run', default=False, action='store_true', help='Do not execute any commands.') parser.add_option('--inside-toolchain', dest='inside_toolchain', default=bool(os.environ.get('INSIDE_TOOLCHAIN')), action='store_true', help='Inside toolchain build.') parser.add_option('--android', dest='android', default=False, action='store_true', help='Build for Android.') parser.add_option('--clang', dest='clang', default=False, action='store_true', help='Build trusted code with Clang.') parser.add_option('--coverage', dest='coverage', default=False, action='store_true', help='Build and test for code coverage.') parser.add_option('--validator', dest='validator', default=False, action='store_true', help='Only run validator regression test') parser.add_option('--asan', dest='asan', default=False, action='store_true', help='Build trusted code with ASan.') parser.add_option('--scons-args', dest='scons_args', default =[], action='append', help='Extra scons arguments.') parser.add_option('--step-suffix', metavar='SUFFIX', default='', help='Append SUFFIX to buildbot step names.') parser.add_option('--no-gyp', dest='no_gyp', default=False, action='store_true', help='Do not run the gyp build') parser.add_option('--no-goma', dest='no_goma', default=False, action='store_true', help='Do not run with goma') parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools', default=False, action='store_true', help='Use breakpad tools for testing') parser.add_option('--skip-build', dest='skip_build', default=False, action='store_true', help='Skip building steps in buildbot_pnacl') parser.add_option('--skip-run', dest='skip_run', default=False, action='store_true', help='Skip test-running steps in buildbot_pnacl') options, args = parser.parse_args() if len(args) != 3: parser.error('Expected 3 arguments: mode arch toolchain') # script + 3 args == 4 mode, arch, toolchain = args if mode not in ('dbg', 'opt', 'coverage'): parser.error('Invalid mode %r' % mode) if arch not in ARCH_MAP: parser.error('Invalid arch %r' % arch) if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'): parser.error('Invalid toolchain %r' % toolchain) # TODO(ncbray) allow a command-line override platform = GetHostPlatform() context['platform'] = platform context['mode'] = mode context['arch'] = arch context['android'] = options.android # ASan is Clang, so set the flag to simplify other checks. context['clang'] = options.clang or options.asan context['validator'] = options.validator context['asan'] = options.asan # TODO(ncbray) turn derived values into methods. context['gyp_mode'] = { 'opt': 'Release', 'dbg': 'Debug', 'coverage': 'Debug'}[mode] context['gn_is_debug'] = { 'opt': 'false', 'dbg': 'true', 'coverage': 'true'}[mode] context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch'] context['gyp_vars'] = [] if context['clang']: context['gyp_vars'].append('clang=1') if context['asan']: context['gyp_vars'].append('asan=1') context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform'] context['default_scons_mode'] = ['nacl'] # Only Linux can build trusted code on ARM. # TODO(mcgrathr): clean this up somehow if arch != 'arm' or platform == 'linux': context['default_scons_mode'] += [mode + '-host'] context['use_glibc'] = toolchain == 'glibc' context['pnacl'] = toolchain == 'pnacl' context['nacl_clang'] = toolchain == 'nacl_clang' context['max_jobs'] = 8 context['dry_run'] = options.dry_run context['inside_toolchain'] = options.inside_toolchain context['step_suffix'] = options.step_suffix context['no_gyp'] = options.no_gyp context['no_goma'] = options.no_goma context['coverage'] = options.coverage context['use_breakpad_tools'] = options.use_breakpad_tools context['scons_args'] = options.scons_args context['skip_build'] = options.skip_build context['skip_run'] = options.skip_run # Don't run gyp on coverage builds. if context['coverage']: context['no_gyp'] = True for key, value in sorted(context.config.items()): print '%s=%s' % (key, value) def EnsureDirectoryExists(path): """ Create a directory if it does not already exist. Does not mask failures, but there really shouldn't be any. """ if not os.path.exists(path): os.makedirs(path) def TryToCleanContents(path, file_name_filter=lambda fn: True): """ Remove the contents of a directory without touching the directory itself. Ignores all failures. """ if os.path.exists(path): for fn in os.listdir(path): TryToCleanPath(os.path.join(path, fn), file_name_filter) def TryToCleanPath(path, file_name_filter=lambda fn: True): """ Removes a file or directory. Ignores all failures. """ if os.path.exists(path): if file_name_filter(path): print 'Trying to remove %s' % path try: RemovePath(path) except Exception: print 'Failed to remove %s' % path else: print 'Skipping %s' % path def Retry(op, *args): # Windows seems to be prone to having commands that delete files or # directories fail. We currently do not have a complete understanding why, # and as a workaround we simply retry the command a few times. # It appears that file locks are hanging around longer than they should. This # may be a secondary effect of processes hanging around longer than they # should. This may be because when we kill a browser sel_ldr does not exit # immediately, etc. # Virus checkers can also accidently prevent files from being deleted, but # that shouldn't be a problem on the bots. if GetHostPlatform() == 'win': count = 0 while True: try: op(*args) break except Exception: print "FAILED: %s %s" % (op.__name__, repr(args)) count += 1 if count < 5: print "RETRY: %s %s" % (op.__name__, repr(args)) time.sleep(pow(2, count)) else: # Don't mask the exception. raise else: op(*args) def PermissionsFixOnError(func, path, exc_info): if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWUSR) func(path) else: raise def _RemoveDirectory(path): print 'Removing %s' % path if os.path.exists(path): shutil.rmtree(path, onerror=PermissionsFixOnError) print ' Succeeded.' else: print ' Path does not exist, nothing to do.' def RemoveDirectory(path): """ Remove a directory if it exists. Does not mask failures, although it does retry a few times on Windows. """ Retry(_RemoveDirectory, path) def RemovePath(path): """Remove a path, file or directory.""" if os.path.isdir(path): RemoveDirectory(path) else: if os.path.isfile(path) and not os.access(path, os.W_OK): os.chmod(path, stat.S_IWUSR) os.remove(path) # This is a sanity check so Command can print out better error information. def FileCanBeFound(name, paths): # CWD if os.path.exists(name): return True # Paths with directories are not resolved using the PATH variable. if os.path.dirname(name): return False # In path for path in paths.split(os.pathsep): full = os.path.join(path, name) if os.path.exists(full): return True return False def RemoveGypBuildDirectories(): # Remove all directories on all platforms. Overkill, but it allows for # straight-line code. # Windows RemoveDirectory('build/Debug') RemoveDirectory('build/Release') RemoveDirectory('build/Debug-Win32') RemoveDirectory('build/Release-Win32') RemoveDirectory('build/Debug-x64') RemoveDirectory('build/Release-x64') # Linux and Mac RemoveDirectory('../xcodebuild') RemoveDirectory('../out') RemoveDirectory('src/third_party/nacl_sdk/arm-newlib') def RemoveSconsBuildDirectories(): RemoveDirectory('scons-out') RemoveDirectory('breakpad-out') # Execute a command using Python's subprocess module. def Command(context, cmd, cwd=None): print 'Running command: %s' % ' '.join(cmd) # Python's subprocess has a quirk. A subprocess can execute with an # arbitrary, user-defined environment. The first argument of the command, # however, is located using the PATH variable of the Python script that is # launching the subprocess. Modifying the PATH in the environment passed to # the subprocess does not affect Python's search for the first argument of # the command (the executable file.) This is a little counter intuitive, # so we're forcing the search to use the same PATH variable as is seen by # the subprocess. env = context.MakeCommandEnv() script_path = os.environ['PATH'] os.environ['PATH'] = env['PATH'] try: if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']: # Make sure that print statements before the subprocess call have been # flushed, otherwise the output of the subprocess call may appear before # the print statements. sys.stdout.flush() if context['dry_run']: retcode = 0 else: retcode = subprocess.call(cmd, cwd=cwd, env=env) else: # Provide a nicer failure message. # If subprocess cannot find the executable, it will throw a cryptic # exception. print 'Executable %r cannot be found.' % cmd[0] retcode = 1 finally: os.environ['PATH'] = script_path print 'Command return code: %d' % retcode if retcode != 0: raise StepFailed() return retcode # A specialized version of CommandStep. def SCons(context, mode=None, platform=None, parallel=False, browser_test=False, args=(), cwd=None): python = sys.executable if mode is None: mode = context['default_scons_mode'] if platform is None: platform = context['default_scons_platform'] if parallel: jobs = context['max_jobs'] else: jobs = 1 cmd = [] if browser_test and context.Linux(): # Although we could use the "browser_headless=1" Scons option, it runs # xvfb-run once per Chromium invocation. This is good for isolating # the tests, but xvfb-run has a stupid fixed-period sleep, which would # slow down the tests unnecessarily. cmd.extend(['xvfb-run', '--auto-servernum']) cmd.extend([ python, 'scons.py', '--verbose', '-k', '-j%d' % jobs, '--mode='+','.join(mode), 'platform='+platform, ]) cmd.extend(context['scons_args']) if context['clang']: cmd.append('--clang') if context['asan']: cmd.append('--asan') if context['use_glibc']: cmd.append('--nacl_glibc') if context['pnacl']: cmd.append('bitcode=1') if context['nacl_clang']: cmd.append('nacl_clang=1') if context['use_breakpad_tools']: cmd.append('breakpad_tools_dir=breakpad-out') if context['android']: cmd.append('android=1') # Append used-specified arguments. cmd.extend(args) Command(context, cmd, cwd) class StepFailed(Exception): """ Thrown when the step has failed. """ class StopBuild(Exception): """ Thrown when the entire build should stop. This does not indicate a failure, in of itself. """ class Step(object): """ This class is used in conjunction with a Python "with" statement to ensure that the preamble and postamble of each build step gets printed and failures get logged. This class also ensures that exceptions thrown inside a "with" statement don't take down the entire build. """ def __init__(self, name, status, halt_on_fail=True): self.status = status if 'step_suffix' in status.context: suffix = status.context['step_suffix'] else: suffix = '' self.name = name + suffix self.halt_on_fail = halt_on_fail self.step_failed = False # Called on entry to a 'with' block. def __enter__(self): sys.stdout.flush() print print '@@@BUILD_STEP %s@@@' % self.name self.status.ReportBegin(self.name) # The method is called on exit from a 'with' block - even for non-local # control flow, i.e. exceptions, breaks, continues, returns, etc. # If an exception is thrown inside a block wrapped with a 'with' statement, # the __exit__ handler can suppress the exception by returning True. This is # used to isolate each step in the build - if an exception occurs in a given # step, the step is treated as a failure. This allows the postamble for each # step to be printed and also allows the build to continue of the failure of # a given step doesn't halt the build. def __exit__(self, type, exception, trace): sys.stdout.flush() if exception is None: # If exception is None, no exception occurred. step_failed = False elif isinstance(exception, StepFailed): step_failed = True print print 'Halting build step because of failure.' print else: step_failed = True print print 'The build step threw an exception...' print traceback.print_exception(type, exception, trace, file=sys.stdout) print if step_failed: self.status.ReportFail(self.name) print '@@@STEP_FAILURE@@@' if self.halt_on_fail: print print 'Entire build halted because %s failed.' % self.name sys.stdout.flush() raise StopBuild() else: self.status.ReportPass(self.name) sys.stdout.flush() # Suppress any exception that occurred. return True # Adds an arbitrary link inside the build stage on the waterfall. def StepLink(text, link): print '@@@STEP_LINK@%s@%s@@@' % (text, link) # Adds arbitrary text inside the build stage on the waterfall. def StepText(text): print '@@@STEP_TEXT@%s@@@' % (text) class BuildStatus(object): """ Keeps track of the overall status of the build. """ def __init__(self, context): self.context = context self.ever_failed = False self.steps = [] def ReportBegin(self, name): pass def ReportPass(self, name): self.steps.append((name, 'passed')) def ReportFail(self, name): self.steps.append((name, 'failed')) self.ever_failed = True # Handy info when this script is run outside of the buildbot. def DisplayBuildStatus(self): print for step, status in self.steps: print '%-40s[%s]' % (step, status) print if self.ever_failed: print 'Build failed.' else: print 'Build succeeded.' def ReturnValue(self): return int(self.ever_failed) class BuildContext(object): """ Encapsulates the information needed for running a build command. This includes environment variables and default arguments for SCons invocations. """ # Only allow these attributes on objects of this type. __slots__ = ['status', 'global_env', 'config'] def __init__(self): # The contents of global_env override os.environ for any commands run via # self.Command(...) self.global_env = {} # PATH is a special case. See: Command. self.global_env['PATH'] = os.environ.get('PATH', '') self.config = {} self['dry_run'] = False # Emulate dictionary subscripting. def __getitem__(self, key): return self.config[key] # Emulate dictionary subscripting. def __setitem__(self, key, value): self.config[key] = value # Emulate dictionary membership test def __contains__(self, key): return key in self.config def Windows(self): return self.config['platform'] == 'win' def Linux(self): return self.config['platform'] == 'linux' def Mac(self): return self.config['platform'] == 'mac' def GetEnv(self, name, default=None): return self.global_env.get(name, default) def SetEnv(self, name, value): self.global_env[name] = str(value) def MakeCommandEnv(self): # The external environment is not sanitized. e = dict(os.environ) # Arbitrary variables can be overridden. e.update(self.global_env) return e def RunBuild(script, status): try: script(status, status.context) except StopBuild: pass # Emit a summary step for three reasons: # - The annotator will attribute non-zero exit status to the last build step. # This can misattribute failures to the last build step. # - runtest.py wraps the builds to scrape perf data. It emits an annotator # tag on exit which misattributes perf results to the last build step. # - Provide a label step in which to show summary result. # Otherwise these go back to the preamble. with Step('summary', status): if status.ever_failed: print 'There were failed stages.' else: print 'Success.' # Display a summary of the build. status.DisplayBuildStatus() sys.exit(status.ReturnValue())
bsd-3-clause
supriyantomaftuh/syzygy
third_party/numpy/files/numpy/polynomial/legendre.py
16
33731
""" Objects for dealing with Legendre series. This module provides a number of objects (mostly functions) useful for dealing with Legendre series, including a `Legendre` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with such polynomials is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `legdomain` -- Legendre series default domain, [-1,1]. - `legzero` -- Legendre series that evaluates identically to 0. - `legone` -- Legendre series that evaluates identically to 1. - `legx` -- Legendre series for the identity map, ``f(x) = x``. Arithmetic ---------- - `legmulx` -- multiply a Legendre series in ``P_i(x)`` by ``x``. - `legadd` -- add two Legendre series. - `legsub` -- subtract one Legendre series from another. - `legmul` -- multiply two Legendre series. - `legdiv` -- divide one Legendre series by another. - `legpow` -- raise a Legendre series to an positive integer power - `legval` -- evaluate a Legendre series at given points. Calculus -------- - `legder` -- differentiate a Legendre series. - `legint` -- integrate a Legendre series. Misc Functions -------------- - `legfromroots` -- create a Legendre series with specified roots. - `legroots` -- find the roots of a Legendre series. - `legvander` -- Vandermonde-like matrix for Legendre polynomials. - `legfit` -- least-squares fit returning a Legendre series. - `legtrim` -- trim leading coefficients from a Legendre series. - `legline` -- Legendre series representing given straight line. - `leg2poly` -- convert a Legendre series to a polynomial. - `poly2leg` -- convert a polynomial to a Legendre series. Classes ------- - `Legendre` -- A Legendre series class. See also -------- `numpy.polynomial` """ from __future__ import division __all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', 'legfit', 'legtrim', 'legroots', 'Legendre'] import numpy as np import numpy.linalg as la import polyutils as pu import warnings from polytemplate import polytemplate legtrim = pu.trimcoef def poly2leg(pol) : """ Convert a polynomial to a Legendre series. Convert an array representing the coefficients of a polynomial (relative to the "standard" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Legendre series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-d array containing the polynomial coefficients Returns ------- cs : ndarray 1-d array containing the coefficients of the equivalent Legendre series. See Also -------- leg2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p Polynomial([ 0., 1., 2., 3.], [-1., 1.]) >>> c = P.Legendre(P.poly2leg(p.coef)) >>> c Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.]) """ [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 for i in range(deg, -1, -1) : res = legadd(legmulx(res), pol[i]) return res def leg2poly(cs) : """ Convert a Legendre series to a polynomial. Convert an array representing the coefficients of a Legendre series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest to highest degree. Parameters ---------- cs : array_like 1-d array containing the Legendre series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-d array containing the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest order term to highest. See Also -------- poly2leg Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> c = P.Legendre(range(4)) >>> c Legendre([ 0., 1., 2., 3.], [-1., 1.]) >>> p = c.convert(kind=P.Polynomial) >>> p Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) >>> P.leg2poly(range(4)) array([-1. , -3.5, 3. , 7.5]) """ from polynomial import polyadd, polysub, polymulx [cs] = pu.as_series([cs]) n = len(cs) if n < 3: return cs else: c0 = cs[-2] c1 = cs[-1] # i is the current degree of c1 for i in range(n - 1, 1, -1) : tmp = c0 c0 = polysub(cs[i - 2], (c1*(i - 1))/i) c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) return polyadd(c0, polymulx(c1)) # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Legendre legdomain = np.array([-1,1]) # Legendre coefficients representing zero. legzero = np.array([0]) # Legendre coefficients representing one. legone = np.array([1]) # Legendre coefficients representing the identity x. legx = np.array([0,1]) def legline(off, scl) : """ Legendre series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Legendre series for ``off + scl*x``. See Also -------- polyline, chebline Examples -------- >>> import numpy.polynomial.legendre as L >>> L.legline(3,2) array([3, 2]) >>> L.legval(-3, L.legline(3,2)) # should be -3 -3.0 """ if scl != 0 : return np.array([off,scl]) else : return np.array([off]) def legfromroots(roots) : """ Generate a Legendre series with the given roots. Return the array of coefficients for the P-series whose roots (a.k.a. "zeros") are given by *roots*. The returned array of coefficients is ordered from lowest order "term" to highest, and zeros of multiplicity greater than one must be included in *roots* a number of times equal to their multiplicity (e.g., if `2` is a root of multiplicity three, then [2,2,2] must be in *roots*). Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-d array of the Legendre series coefficients, ordered from low to high. If all roots are real, ``out.dtype`` is a float type; otherwise, ``out.dtype`` is a complex type, even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, chebfromroots Notes ----- What is returned are the :math:`c_i` such that: .. math:: \\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i]) where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Legendre (basis) polynomial over the domain `[-1,1]`. Note that, unlike `polyfromroots`, due to the nature of the Legendre basis set, the above identity *does not* imply :math:`c_n = 1` identically (see Examples). Examples -------- >>> import numpy.polynomial.legendre as L >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis array([ 0. , -0.4, 0. , 0.4]) >>> j = complex(0,1) >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) """ if len(roots) == 0 : return np.ones(1) else : [roots] = pu.as_series([roots], trim=False) prd = np.array([1], dtype=roots.dtype) for r in roots: prd = legsub(legmulx(prd), r*prd) return prd def legadd(c1, c2): """ Add one Legendre series to another. Returns the sum of two Legendre series `c1` + `c2`. The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-d arrays of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Legendre series of their sum. See Also -------- legsub, legmul, legdiv, legpow Notes ----- Unlike multiplication, division, etc., the sum of two Legendre series is a Legendre series (without having to "reproject" the result onto the basis set) so addition, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legadd(c1,c2) array([ 4., 4., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2) : c1[:c2.size] += c2 ret = c1 else : c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def legsub(c1, c2): """ Subtract one Legendre series from another. Returns the difference of two Legendre series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-d arrays of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Of Legendre series coefficients representing their difference. See Also -------- legadd, legmul, legdiv, legpow Notes ----- Unlike multiplication, division, etc., the difference of two Legendre series is a Legendre series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legsub(c1,c2) array([-2., 0., 2.]) >>> L.legsub(c2,c1) # -C.legsub(c1,c2) array([ 2., 0., -2.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2) : c1[:c2.size] -= c2 ret = c1 else : c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def legmulx(cs): """Multiply a Legendre series by x. Multiply the Legendre series `cs` by x, where x is the independent variable. Parameters ---------- cs : array_like 1-d array of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- The multiplication uses the recursion relationship for Legendre polynomials in the form .. math:: xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) """ # cs is a trimmed copy [cs] = pu.as_series([cs]) # The zero series needs special treatment if len(cs) == 1 and cs[0] == 0: return cs prd = np.empty(len(cs) + 1, dtype=cs.dtype) prd[0] = cs[0]*0 prd[1] = cs[0] for i in range(1, len(cs)): j = i + 1 k = i - 1 s = i + j prd[j] = (cs[i]*j)/s prd[k] += (cs[i]*i)/s return prd def legmul(c1, c2): """ Multiply one Legendre series by another. Returns the product of two Legendre series `c1` * `c2`. The arguments are sequences of coefficients, from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-d arrays of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Of Legendre series coefficients representing their product. See Also -------- legadd, legsub, legdiv, legpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Legendre polynomial basis set. Thus, to express the product as a Legendre series, it is necessary to "re-project" the product onto said basis set, which may produce "un-intuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2) >>> P.legmul(c1,c2) # multiplication requires "reprojection" array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) """ # s1, s2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): cs = c2 xs = c1 else: cs = c1 xs = c2 if len(cs) == 1: c0 = cs[0]*xs c1 = 0 elif len(cs) == 2: c0 = cs[0]*xs c1 = cs[1]*xs else : nd = len(cs) c0 = cs[-2]*xs c1 = cs[-1]*xs for i in range(3, len(cs) + 1) : tmp = c0 nd = nd - 1 c0 = legsub(cs[-i]*xs, (c1*(nd - 1))/nd) c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) return legadd(c0, legmulx(c1)) def legdiv(c1, c2): """ Divide one Legendre series by another. Returns the quotient-with-remainder of two Legendre series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Legendre series coefficients ordered from low to high. Returns ------- quo, rem : ndarrays Of Legendre series coefficients representing the quotient and remainder. See Also -------- legadd, legsub, legmul, legpow Notes ----- In general, the (polynomial) division of one Legendre series by another results in quotient and remainder terms that are not in the Legendre polynomial basis set. Thus, to express these results as a Legendre series, it is necessary to "re-project" the results onto the Legendre basis set, which may produce "un-intuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not (array([ 3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> L.legdiv(c2,c1) # neither "intuitive" (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0 : raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) if lc1 < lc2 : return c1[:1]*0, c1 elif lc2 == 1 : return c1/c2[-1], c1[:1]*0 else : quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): p = legmul([0]*i + [1], c2) q = rem[-1]/p[-1] rem = rem[:-1] - q*p[:-1] quo[i] = q return quo, pu.trimseq(rem) def legpow(cs, pow, maxpower=16) : """Raise a Legendre series to a power. Returns the Legendre series `cs` raised to the power `pow`. The arguement `cs` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- cs : array_like 1d array of Legendre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to umanageable size. Default is 16 Returns ------- coef : ndarray Legendre series of power. See Also -------- legadd, legsub, legmul, legdiv Examples -------- """ # cs is a trimmed copy [cs] = pu.as_series([cs]) power = int(pow) if power != pow or power < 0 : raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower : raise ValueError("Power is too large") elif power == 0 : return np.array([1], dtype=cs.dtype) elif power == 1 : return cs else : # This can be made more efficient by using powers of two # in the usual way. prd = cs for i in range(2, power + 1) : prd = legmul(prd, cs) return prd def legder(cs, m=1, scl=1) : """ Differentiate a Legendre series. Returns the series `cs` differentiated `m` times. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `cs` is the sequence of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- cs : array_like 1-D array of Legendre series coefficients ordered from low to high. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) Returns ------- der : ndarray Legendre series of the derivative. See Also -------- legint Notes ----- In general, the result of differentiating a Legendre series does not resemble the same operation on a power series. Thus the result of this function may be "un-intuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial import legendre as L >>> cs = (1,2,3,4) >>> L.legder(cs) array([ 6., 9., 20.]) >>> L.legder(cs,3) array([ 60.]) >>> L.legder(cs,scl=-1) array([ -6., -9., -20.]) >>> L.legder(cs,2,-1) array([ 9., 60.]) """ cnt = int(m) if cnt != m: raise ValueError, "The order of derivation must be integer" if cnt < 0 : raise ValueError, "The order of derivation must be non-negative" # cs is a trimmed copy [cs] = pu.as_series([cs]) if cnt == 0: return cs elif cnt >= len(cs): return cs[:1]*0 else : for i in range(cnt): n = len(cs) - 1 cs *= scl der = np.empty(n, dtype=cs.dtype) for j in range(n, 0, -1): der[j - 1] = (2*j - 1)*cs[j] cs[j - 2] += cs[j] cs = der return cs def legint(cs, m=1, k=[], lbnd=0, scl=1): """ Integrate a Legendre series. Returns a Legendre series that is the Legendre series `cs`, integrated `m` times from `lbnd` to `x`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `cs` is a sequence of coefficients, from lowest order Legendre series "term" to highest, e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`. Parameters ---------- cs : array_like 1-d array of Legendre series coefficients, ordered from low to high. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at ``lbnd`` is the first value in the list, the value of the second integral at ``lbnd`` is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) Returns ------- S : ndarray Legendre series coefficients of the integral. Raises ------ ValueError If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or ``np.isscalar(scl) == False``. See Also -------- legder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Also note that, in general, the result of integrating a C-series needs to be "re-projected" onto the C-series basis set. Thus, typically, the result of this function is "un-intuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial import legendre as L >>> cs = (1,2,3) >>> L.legint(cs) array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) >>> L.legint(cs,3) array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) >>> L.legint(cs, k=3) array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) >>> L.legint(cs, lbnd=-2) array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) >>> L.legint(cs, scl=2) array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) """ cnt = int(m) if np.isscalar(k) : k = [k] if cnt != m: raise ValueError, "The order of integration must be integer" if cnt < 0 : raise ValueError, "The order of integration must be non-negative" if len(k) > cnt : raise ValueError, "Too many integration constants" # cs is a trimmed copy [cs] = pu.as_series([cs]) if cnt == 0: return cs k = list(k) + [0]*(cnt - len(k)) for i in range(cnt) : n = len(cs) cs *= scl if n == 1 and cs[0] == 0: cs[0] += k[i] else: tmp = np.empty(n + 1, dtype=cs.dtype) tmp[0] = cs[0]*0 tmp[1] = cs[0] for j in range(1, n): t = cs[j]/(2*j + 1) tmp[j + 1] = t tmp[j - 1] -= t tmp[0] += k[i] - legval(lbnd, tmp) cs = tmp return cs def legval(x, cs): """Evaluate a Legendre series. If `cs` is of length `n`, this function returns : ``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)`` If x is a sequence or array then p(x) will have the same shape as x. If r is a ring_like object that supports multiplication and addition by the values in `cs`, then an object of the same type is returned. Parameters ---------- x : array_like, ring_like Array of numbers or objects that support multiplication and addition with themselves and with the elements of `cs`. cs : array_like 1-d array of Legendre coefficients ordered from low to high. Returns ------- values : ndarray, ring_like If the return is an ndarray then it has the same shape as `x`. See Also -------- legfit Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- """ # cs is a trimmed copy [cs] = pu.as_series([cs]) if isinstance(x, tuple) or isinstance(x, list) : x = np.asarray(x) if len(cs) == 1 : c0 = cs[0] c1 = 0 elif len(cs) == 2 : c0 = cs[0] c1 = cs[1] else : nd = len(cs) c0 = cs[-2] c1 = cs[-1] for i in range(3, len(cs) + 1) : tmp = c0 nd = nd - 1 c0 = cs[-i] - (c1*(nd - 1))/nd c1 = tmp + (c1*x*(2*nd - 1))/nd return c0 + c1*x def legvander(x, deg) : """Vandermonde matrix of given degree. Returns the Vandermonde matrix of degree `deg` and sample points `x`. This isn't a true Vandermonde matrix because `x` can be an arbitrary ndarray and the Legendre polynomials aren't powers. If ``V`` is the returned matrix and `x` is a 2d array, then the elements of ``V`` are ``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Legendre polynomial of degree ``k``. Parameters ---------- x : array_like Array of points. The values are converted to double or complex doubles. If x is scalar it is converted to a 1D array. deg : integer Degree of the resulting matrix. Returns ------- vander : Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg+1,)``. The last index is the degree. """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype) # Use forward recursion to generate the entries. This is not as accurate # as reverse recursion in this application but it is more efficient. v[0] = x*0 + 1 if ideg > 0 : v[1] = x for i in range(2, ideg + 1) : v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i return np.rollaxis(v, 0, v.ndim) def legfit(x, y, deg, rcond=None, full=False, w=None): """ Least squares fit of Legendre series to data. Fit a Legendre series ``p(x) = p[0] * P_{0}(x) + ... + p[deg] * P_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (M,) or (M, K) Legendre coefficients ordered from low to high. If `y` was 2-D, the coefficients for the data in column k of `y` are in column `k`. [residuals, rank, singular_values, rcond] : present when `full` = True Residuals of the least-squares fit, the effective rank of the scaled Vandermonde matrix and its singular values, and the specified value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- legval : Evaluates a Legendre series. legvander : Vandermonde matrix of Legendre series. polyfit : least squares fit using polynomials. chebfit : least squares fit using Chebyshev series. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution are the coefficients ``c[i]`` of the Legendre series ``P(x)`` that minimizes the squared error ``E = \\sum_j |y_j - P(x_j)|^2``. This problem is solved by setting up as the overdetermined matrix equation ``V(x)*c = y``, where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are the coefficients to be solved for, and the elements of `y` are the observed values. This equation is then solved using the singular value decomposition of ``V``. If some of the singular values of ``V`` are so small that they are neglected, then a `RankWarning` will be issued. This means that the coeficient values may be poorly determined. Using a lower order fit will usually get rid of the warning. The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Fits using Legendre series are usually better conditioned than fits using power series, but much can depend on the distribution of the sample points and the smoothness of the data. If the quality of the fit is inadequate splines may be a good alternative. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting Examples -------- """ order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 # check arguments. if deg < 0 : raise ValueError, "expected deg >= 0" if x.ndim != 1: raise TypeError, "expected 1D vector for x" if x.size == 0: raise TypeError, "expected non-empty vector for x" if y.ndim < 1 or y.ndim > 2 : raise TypeError, "expected 1D or 2D array for y" if len(x) != len(y): raise TypeError, "expected x and y to have same length" # set up the least squares matrices lhs = legvander(x, deg) rhs = y if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError, "expected 1D vector for w" if len(x) != len(w): raise TypeError, "expected x and w to have same length" # apply weights if rhs.ndim == 2: lhs *= w[:, np.newaxis] rhs *= w[:, np.newaxis] else: lhs *= w[:, np.newaxis] rhs *= w # set rcond if rcond is None : rcond = len(x)*np.finfo(x.dtype).eps # scale the design matrix and solve the least squares equation scl = np.sqrt((lhs*lhs).sum(0)) c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond) c = (c.T/scl).T # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning) if full : return c, [resids, rank, s, rcond] else : return c def legroots(cs): """ Compute the roots of a Legendre series. Return the roots (a.k.a "zeros") of the Legendre series represented by `cs`, which is the sequence of coefficients from lowest order "term" to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``. Parameters ---------- cs : array_like 1-d array of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array of the roots. If all the roots are real, then so is the dtype of ``out``; otherwise, ``out``'s dtype is complex. See Also -------- polyroots chebroots Notes ----- Algorithm(s) used: Remember: because the Legendre series basis set is different from the "standard" basis set, the results of this function *may* not be what one is expecting. Examples -------- >>> import numpy.polynomial as P >>> P.polyroots((1, 2, 3, 4)) # 4x^3 + 3x^2 + 2x + 1 has two complex roots array([-0.60582959+0.j , -0.07208521-0.63832674j, -0.07208521+0.63832674j]) >>> P.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0 has only real roots array([-0.85099543, -0.11407192, 0.51506735]) """ # cs is a trimmed copy [cs] = pu.as_series([cs]) if len(cs) <= 1 : return np.array([], dtype=cs.dtype) if len(cs) == 2 : return np.array([-cs[0]/cs[1]]) n = len(cs) - 1 cs /= cs[-1] cmat = np.zeros((n,n), dtype=cs.dtype) cmat[1, 0] = 1 for i in range(1, n): tmp = 2*i + 1 cmat[i - 1, i] = i/tmp if i != n - 1: cmat[i + 1, i] = (i + 1)/tmp else: cmat[:, i] -= cs[:-1]*(i + 1)/tmp roots = la.eigvals(cmat) roots.sort() return roots # # Legendre series class # exec polytemplate.substitute(name='Legendre', nick='leg', domain='[-1,1]')
apache-2.0
holmes/intellij-community
python/lib/Lib/readline.py
82
5885
from __future__ import with_statement import os.path import sys from warnings import warn import java.lang.reflect.Array __all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer', 'get_completer_delims', 'get_current_history_length', 'get_endidx', 'get_history_item', 'get_history_length', 'get_line_buffer', 'insert_text', 'parse_and_bind', 'read_history_file', 'read_init_file', 'redisplay', 'remove_history_item', 'set_completer', 'set_completer_delims', 'set_history_length', 'set_pre_input_hook', 'set_startup_hook', 'write_history_file'] try: _reader = sys._jy_interpreter.reader except AttributeError: raise ImportError("Cannot access JLineConsole") _history_list = None # The need for the following warnings should go away once we update # JLine. Choosing ImportWarning as the closest warning to what is # going on here, namely this is functionality not yet available on # Jython. class NotImplementedWarning(ImportWarning): """Not yet implemented by Jython""" class SecurityWarning(ImportWarning): """Security manager prevents access to private field""" def _setup_history(): # This is obviously not desirable, but avoids O(n) workarounds to # modify the history (ipython uses the function # remove_history_item to mutate the history relatively frequently) global _history_list history = _reader.history try: history_list_field = history.class.getDeclaredField("history") history_list_field.setAccessible(True) _history_list = history_list_field.get(history) except: pass _setup_history() def parse_and_bind(string): if string == "tab: complete": try: keybindings_field = _reader.class.getDeclaredField("keybindings") keybindings_field.setAccessible(True) keybindings = keybindings_field.get(_reader) COMPLETE = _reader.KEYMAP_NAMES.get('COMPLETE') if java.lang.reflect.Array.getShort(keybindings, 9) != COMPLETE: java.lang.reflect.Array.setShort(keybindings, 9, COMPLETE) except: warn("Cannot bind tab key to complete. You need to do this in a .jlinebindings.properties file instead", SecurityWarning, stacklevel=2) else: warn("Cannot bind key %s. You need to do this in a .jlinebindings.properties file instead" % (string,), NotImplementedWarning, stacklevel=2) def get_line_buffer(): return str(_reader.cursorBuffer.buffer) def insert_text(string): _reader.putString(string) def read_init_file(filename=None): warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2) def read_history_file(filename="~/.history"): print "Reading history:", filename expanded = os.path.expanduser(filename) new_history = _reader.getHistory().getClass()() # new_history.clear() with open(expanded) as f: for line in f: new_history.addToHistory(line.rstrip()) _reader.history = new_history _setup_history() def write_history_file(filename="~/.history"): expanded = os.path.expanduser(filename) with open(expanded, 'w') as f: for line in _reader.history.historyList: f.write(line) f.write("\n") def clear_history(): _reader.history.clear() def add_history(line): _reader.addToHistory(line) def get_history_length(): return _reader.history.maxSize def set_history_length(length): _reader.history.maxSize = length def get_current_history_length(): return len(_reader.history.historyList) def get_history_item(index): return _reader.history.historyList[index] def remove_history_item(pos): if _history_list: _history_list.remove(pos) else: warn("Cannot remove history item at position: %s" % (pos,), SecurityWarning, stacklevel=2) def redisplay(): _reader.redrawLine() def set_startup_hook(function=None): sys._jy_interpreter.startupHook = function def set_pre_input_hook(function=None): warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2) _completer_function = None def set_completer(function=None): """set_completer([function]) -> None Set or remove the completer function. The function is called as function(text, state), for state in 0, 1, 2, ..., until it returns a non-string. It should return the next possible completion starting with 'text'.""" global _completer_function _completer_function = function def complete_handler(buffer, cursor, candidates): start = _get_delimited(buffer, cursor)[0] delimited = buffer[start:cursor] for state in xrange(100): # TODO arbitrary, what's the number used by gnu readline? completion = None try: completion = function(delimited, state) except: pass if completion: candidates.add(completion) else: break return start _reader.addCompletor(complete_handler) def get_completer(): return _completer_function def _get_delimited(buffer, cursor): start = cursor for i in xrange(cursor-1, -1, -1): if buffer[i] in _completer_delims: break start = i return start, cursor def get_begidx(): return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0] def get_endidx(): return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1] def set_completer_delims(string): global _completer_delims, _completer_delims_set _completer_delims = string _completer_delims_set = set(string) def get_completer_delims(): return _completer_delims set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
apache-2.0
ghchinoy/tensorflow
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
2
80753
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for GBDT train function.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf import text_format from tensorflow.contrib import layers from tensorflow.contrib import learn from tensorflow.contrib.boosted_trees.proto import learner_pb2 from tensorflow.contrib.boosted_trees.proto import tree_config_pb2 from tensorflow.contrib.boosted_trees.python.ops import model_ops from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch from tensorflow.contrib.boosted_trees.python.utils import losses from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.python.feature_column import feature_column_lib as core_feature_column from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resources from tensorflow.python.ops import variables from tensorflow.python.platform import googletest def _squared_loss(label, unused_weights, predictions): """Unweighted loss implementation.""" loss = math_ops.reduce_sum( math_ops.squared_difference(predictions, label), 1, keepdims=True) return loss def _append_to_leaf(leaf, c_id, w): """Helper method for building tree leaves. Appends weight contributions for the given class index to a leaf node. Args: leaf: leaf node to append to. c_id: class Id for the weight update. w: weight contribution value. """ leaf.sparse_vector.index.append(c_id) leaf.sparse_vector.value.append(w) def _set_float_split(split, feat_col, thresh, l_id, r_id): """Helper method for building tree float splits. Sets split feature column, threshold and children. Args: split: split node to update. feat_col: feature column for the split. thresh: threshold to split on forming rule x <= thresh. l_id: left child Id. r_id: right child Id. """ split.feature_column = feat_col split.threshold = thresh split.left_id = l_id split.right_id = r_id class GbdtTest(test_util.TensorFlowTestCase): def setUp(self): super(GbdtTest, self).setUp() def testExtractFeatures(self): """Tests feature extraction.""" with self.cached_session(): features = {} features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32) features["sparse_float"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.float32), array_ops.zeros([2], dtypes.int64)) features["sparse_int"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.int64), array_ops.zeros([2], dtypes.int64)) (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) = ( gbdt_batch.extract_features(features, None, use_core_columns=False)) self.assertEqual(len(fc_names), 3) self.assertAllEqual(fc_names, ["dense_float", "sparse_float", "sparse_int"]) self.assertEqual(len(dense_floats), 1) self.assertEqual(len(sparse_float_indices), 1) self.assertEqual(len(sparse_float_values), 1) self.assertEqual(len(sparse_float_shapes), 1) self.assertEqual(len(sparse_int_indices), 1) self.assertEqual(len(sparse_int_values), 1) self.assertEqual(len(sparse_int_shapes), 1) self.assertAllEqual(dense_floats[0].eval(), features["dense_float"].eval()) self.assertAllEqual(sparse_float_indices[0].eval(), features["sparse_float"].indices.eval()) self.assertAllEqual(sparse_float_values[0].eval(), features["sparse_float"].values.eval()) self.assertAllEqual(sparse_float_shapes[0].eval(), features["sparse_float"].dense_shape.eval()) self.assertAllEqual(sparse_int_indices[0].eval(), features["sparse_int"].indices.eval()) self.assertAllEqual(sparse_int_values[0].eval(), features["sparse_int"].values.eval()) self.assertAllEqual(sparse_int_shapes[0].eval(), features["sparse_int"].dense_shape.eval()) def testExtractFeaturesWithTransformation(self): """Tests feature extraction.""" with self.cached_session(): features = {} features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32) features["sparse_float"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.float32), array_ops.zeros([2], dtypes.int64)) features["sparse_categorical"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.string), array_ops.zeros([2], dtypes.int64)) feature_columns = set() feature_columns.add(layers.real_valued_column("dense_float")) feature_columns.add( layers.feature_column._real_valued_var_len_column( "sparse_float", is_sparse=True)) feature_columns.add( feature_column_lib.sparse_column_with_hash_bucket( "sparse_categorical", hash_bucket_size=1000000)) (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) = ( gbdt_batch.extract_features( features, feature_columns, use_core_columns=False)) self.assertEqual(len(fc_names), 3) self.assertAllEqual(fc_names, ["dense_float", "sparse_float", "sparse_categorical"]) self.assertEqual(len(dense_floats), 1) self.assertEqual(len(sparse_float_indices), 1) self.assertEqual(len(sparse_float_values), 1) self.assertEqual(len(sparse_float_shapes), 1) self.assertEqual(len(sparse_int_indices), 1) self.assertEqual(len(sparse_int_values), 1) self.assertEqual(len(sparse_int_shapes), 1) self.assertAllEqual(dense_floats[0].eval(), features["dense_float"].eval()) self.assertAllEqual(sparse_float_indices[0].eval(), features["sparse_float"].indices.eval()) self.assertAllEqual(sparse_float_values[0].eval(), features["sparse_float"].values.eval()) self.assertAllEqual(sparse_float_shapes[0].eval(), features["sparse_float"].dense_shape.eval()) self.assertAllEqual(sparse_int_indices[0].eval(), features["sparse_categorical"].indices.eval()) self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263]) self.assertAllEqual(sparse_int_shapes[0].eval(), features["sparse_categorical"].dense_shape.eval()) def testExtractFeaturesFromCoreFeatureColumns(self): """Tests feature extraction when using core columns.""" with self.cached_session(): features = {} # Sparse float column does not exist in core, so only dense numeric and # categorical. features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32) features["sparse_categorical"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.string), array_ops.zeros([2], dtypes.int64)) feature_columns = set() feature_columns.add(core_feature_column.numeric_column("dense_float")) feature_columns.add( core_feature_column.categorical_column_with_hash_bucket( "sparse_categorical", hash_bucket_size=1000000)) (fc_names, dense_floats, _, _, _, sparse_int_indices, sparse_int_values, sparse_int_shapes) = ( gbdt_batch.extract_features( features, feature_columns, use_core_columns=True)) self.assertEqual(len(fc_names), 2) self.assertAllEqual(fc_names, ["dense_float", "sparse_categorical"]) self.assertEqual(len(dense_floats), 1) self.assertEqual(len(sparse_int_indices), 1) self.assertEqual(len(sparse_int_values), 1) self.assertEqual(len(sparse_int_shapes), 1) self.assertAllEqual(dense_floats[0].eval(), features["dense_float"].eval()) self.assertAllEqual(sparse_int_indices[0].eval(), features["sparse_categorical"].indices.eval()) self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263]) self.assertAllEqual(sparse_int_shapes[0].eval(), features["sparse_categorical"].dense_shape.eval()) def testTrainFnChiefNoBiasCentering(self): """Tests the train function running on chief without bias centering.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [0.1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { dense_float_binary_split { threshold: 1.0 left_id: 1 right_id: 2 } node_metadata { gain: 0 } } nodes { leaf { vector { value: 0.25 } } } nodes { leaf { vector { value: 0.0 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testObliviousDecisionTreeAsWeakLearner(self): with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.learning_rate_tuner.fixed.learning_rate = 1 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 2 learner_config.constraints.min_node_weight = 0 learner_config.weak_learner_type = ( learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE) learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER features = {} features["dense_float"] = array_ops.constant([[-2], [-1], [1], [2]], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN) predictions = predictions_dict["predictions"] labels = array_ops.constant([[-2], [-1], [1], [2]], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Second run. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { oblivious_dense_float_binary_split { threshold: -1.0 } node_metadata { gain: 4.5 original_oblivious_leaves { } } } nodes { leaf { vector { value: -1.5 } } } nodes { leaf { vector { value: 1.5 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) # Third run. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [1]) self.assertEquals(stamp_token.eval(), 3) expected_tree = """ nodes { oblivious_dense_float_binary_split { threshold: -1.0 } node_metadata { gain: 4.5 original_oblivious_leaves { } } } nodes { oblivious_dense_float_binary_split { threshold: -2.0 } node_metadata { gain: 0.25 original_oblivious_leaves { vector { value: -1.5 } } original_oblivious_leaves { vector { value: 1.5 } } } } nodes { leaf { vector { value: -2.0 } } } nodes { leaf { vector { value: -1.0 } } } nodes { leaf { vector { value: 1.5 } } } nodes { leaf { vector { value: 1.5 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testTrainFnChiefSparseAndDense(self): """Tests the train function with sparse and dense features.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) features["sparse_float"] = sparse_tensor.SparseTensor( array_ops.zeros([2, 2], dtypes.int64), array_ops.zeros([2], dtypes.float32), array_ops.constant([4, 1], dtypes.int64)) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [0.1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { sparse_float_binary_split_default_right { split{ left_id: 1 right_id: 2 } } node_metadata { gain: 1.125 } } nodes { leaf { vector { value: 1.0 } } } nodes { leaf { vector { value: -0.5 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testTrainFnChiefScalingNumberOfExamples(self): """Tests the train function running on chief without bias centering.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 num_examples_fn = ( lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1) features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=num_examples_fn, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [0.1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { dense_float_binary_split { threshold: 1.0 left_id: 1 right_id: 2 } node_metadata { gain: 0 } } nodes { leaf { vector { value: 0.25 } } } nodes { leaf { vector { value: 0.0 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testTrainFnChiefWithBiasCentering(self): """Tests the train function running on chief with bias centering.""" with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=True, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect bias to be centered. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) expected_tree = """ nodes { leaf { vector { value: 0.25 } } }""" self.assertEquals(len(output.trees), 1) self.assertAllEqual(output.tree_weights, [1.0]) self.assertProtoEquals(expected_tree, output.trees[0]) self.assertEquals(stamp_token.eval(), 1) def testTrainFnNonChiefNoBiasCentering(self): """Tests the train function running on worker without bias centering.""" with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=False, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # Regardless of how many times the train op is run, a non-chief worker # can only accumulate stats so the tree ensemble never changes. for _ in range(5): train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 0) def testTrainFnNonChiefWithCentering(self): """Tests the train function running on worker with bias centering.""" with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=False, num_ps_replicas=0, center_bias=True, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # Regardless of how many times the train op is run, a non-chief worker # can only accumulate stats so the tree ensemble never changes. for _ in range(5): train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 0) def testPredictFn(self): """Tests the predict function.""" with self.cached_session() as sess: # Create ensemble with one bias node. ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() text_format.Merge( """ trees { nodes { leaf { vector { value: 0.25 } } } } tree_weights: 1.0 tree_metadata { num_tree_weight_updates: 1 num_layers_grown: 1 is_finalized: true }""", ensemble_config) ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=3, tree_ensemble_config=ensemble_config.SerializeToString(), name="tree_ensemble") resources.initialize_resources(resources.shared_resources()).run() learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=False, num_ps_replicas=0, center_bias=True, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) # Create predict op. mode = model_fn.ModeKeys.EVAL predictions_dict = sess.run(gbdt_model.predict(mode)) self.assertEquals(predictions_dict["ensemble_stamp"], 3) self.assertAllClose(predictions_dict["predictions"], [[0.25], [0.25], [0.25], [0.25]]) self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0]) def testPredictFnWithLeafIndexAdvancedLeft(self): """Tests the predict function with output leaf ids.""" with self.cached_session() as sess: # Create ensemble with one bias node. ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() text_format.Merge( """ trees { nodes { dense_float_binary_split { threshold: 1.0 left_id: 1 right_id: 2 } node_metadata { gain: 0 } } nodes { leaf { vector { value: 0.25 } } } nodes { leaf { vector { value: 0.15 } } } } trees { nodes { dense_float_binary_split { threshold: 0.99 left_id: 1 right_id: 2 } node_metadata { gain: 00 } } nodes { leaf { vector { value: 0.25 } } } nodes { leaf { vector { value: 0.23 } } } } tree_weights: 1.0 tree_weights: 1.0 tree_metadata { num_tree_weight_updates: 1 num_layers_grown: 1 is_finalized: true } tree_metadata { num_tree_weight_updates: 1 num_layers_grown: 1 is_finalized: true }""", ensemble_config) ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=3, tree_ensemble_config=ensemble_config.SerializeToString(), name="tree_ensemble") resources.initialize_resources(resources.shared_resources()).run() learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.constant( [[0.0], [1.0], [1.1], [2.0]], dtype=dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=False, num_ps_replicas=0, center_bias=True, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features, output_leaf_index=True) # Create predict op. mode = model_fn.ModeKeys.INFER predictions_dict = sess.run(gbdt_model.predict(mode)) self.assertEquals(predictions_dict["ensemble_stamp"], 3) # here are how the numbers in expected results are calculated, # 0.5 = 0.25 + 0.25 # 0.48 = 0.25 + 0.23 # 0.38 = 0.15 + 0.23 # 0.38 = 0.15 + 0.23 self.assertAllClose(predictions_dict["predictions"], [[0.5], [0.48], [0.38], [0.38]]) self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0]) self.assertAllClose(predictions_dict["leaf_index"], [[1, 1], [1, 2], [2, 2], [2, 2]]) def testTrainFnMulticlassFullHessian(self): """Tests the GBDT train for multiclass full hessian.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 1 # Use full hessian multiclass strategy. learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.FULL_HESSIAN) learner_config.num_classes = 5 learner_config.regularization.l1 = 0 # To make matrix inversible. learner_config.regularization.l2 = 1e-5 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} batch_size = 3 features["dense_float"] = array_ops.constant( [0.3, 1.5, 1.1], dtype=dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=5, features=features) predictions = array_ops.constant( [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0], [0.0, 0.0, 0.0, 0.0, 1.2]], dtype=dtypes.float32) labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32) weights = array_ops.ones([batch_size, 1], dtypes.float32) partition_ids = array_ops.zeros([batch_size], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 0, } # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( losses.per_example_maxent_loss( labels, weights, predictions, num_classes=learner_config.num_classes)[0]), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 1) # We got 3 nodes: one parent and 2 leafs. self.assertEqual(len(output.trees[0].nodes), 3) self.assertAllClose(output.tree_weights, [1]) self.assertEquals(stamp_token.eval(), 2) # Leafs should have a dense vector of size 5. expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508] expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264] self.assertArrayNear(expected_leaf_1, output.trees[0].nodes[1].leaf.vector.value, 7e-3) self.assertArrayNear(expected_leaf_2, output.trees[0].nodes[2].leaf.vector.value, 7e-3) def testTrainFnMulticlassDiagonalHessian(self): """Tests the GBDT train for multiclass diagonal hessian.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 1 # Use full hessian multiclass strategy. learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.DIAGONAL_HESSIAN) learner_config.num_classes = 5 learner_config.regularization.l1 = 0 # To make matrix inversible. learner_config.regularization.l2 = 1e-5 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 batch_size = 3 features = {} features["dense_float"] = array_ops.constant( [0.3, 1.5, 1.1], dtype=dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=5, features=features) predictions = array_ops.constant( [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0], [0.0, 0.0, 0.0, 0.0, 1.2]], dtype=dtypes.float32) labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32) weights = array_ops.ones([batch_size, 1], dtypes.float32) partition_ids = array_ops.zeros([batch_size], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 0, } # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( losses.per_example_maxent_loss( labels, weights, predictions, num_classes=learner_config.num_classes)[0]), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 0) self.assertEqual(len(output.tree_weights), 0) self.assertEqual(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 1) # We got 3 nodes: one parent and 2 leafs. self.assertEqual(len(output.trees[0].nodes), 3) self.assertAllClose(output.tree_weights, [1]) self.assertEqual(stamp_token.eval(), 2) # Leafs should have a dense vector of size 5. expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023] expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269] self.assertArrayNear(expected_leaf_1, output.trees[0].nodes[1].leaf.vector.value, 1e-3) self.assertArrayNear(expected_leaf_2, output.trees[0].nodes[2].leaf.vector.value, 1e-3) def testTrainFnMulticlassDiagonalHessianOblivious(self): """Tests the GBDT train for multiclass diagonal hessian.""" with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 1 # Use full hessian multiclass strategy. learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.DIAGONAL_HESSIAN) learner_config.num_classes = 5 learner_config.regularization.l1 = 0 # To make matrix inversible. learner_config.regularization.l2 = 1e-5 learner_config.weak_learner_type = ( learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE) learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE learner_config.constraints.max_tree_depth = 5 learner_config.constraints.min_node_weight = 0 batch_size = 3 features = {} features["sparse_int"] = sparse_tensor.SparseTensor( array_ops.constant([[0, 0], [1, 0]], dtypes.int64), array_ops.constant([1, 2], dtypes.int64), array_ops.constant([3, 1], dtypes.int64)) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=5, features=features) labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32) weights = array_ops.ones([batch_size, 1], dtypes.float32) predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN) predictions = predictions_dict["predictions"] # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( losses.per_example_maxent_loss( labels, weights, predictions, num_classes=learner_config.num_classes)[0]), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) # Grow 2 layers. train_op.run() train_op.run() output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 1) # We got 6 nodes: one parent and 4 leafs. self.assertEqual(len(output.trees[0].nodes), 6) self.assertAllClose(output.tree_weights, [1]) self.assertEqual(stamp_token.eval(), 2) print(output.trees[0]) # Leafs should have a dense vector of size 5. expected_leaf_1 = [-1.2497, -1.24976, 4.999, -1.24976, -1.2497] expected_leaf_2 = [-2.2362, -2.2362, 6.0028, -2.2362, -2.2362] expected_leaf_3 = [-2.2694, -2.2694, 4.0064, -0.0084, -2.2694] expected_leaf_4 = [-2.2694, -2.2694, -0.0084, 4.0064, -2.2694] self.assertArrayNear(expected_leaf_1, output.trees[0].nodes[2].leaf.vector.value, 1e-3) self.assertArrayNear(expected_leaf_2, output.trees[0].nodes[3].leaf.vector.value, 1e-3) self.assertArrayNear(expected_leaf_3, output.trees[0].nodes[4].leaf.vector.value, 1e-3) self.assertArrayNear(expected_leaf_4, output.trees[0].nodes[5].leaf.vector.value, 1e-3) def testTrainFnMulticlassTreePerClass(self): """Tests the GBDT train for multiclass tree per class strategy.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 1 # Use full hessian multiclass strategy. learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.TREE_PER_CLASS) learner_config.num_classes = 5 learner_config.regularization.l1 = 0 # To make matrix inversible. learner_config.regularization.l2 = 1e-5 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = { "dense_float": array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32), } gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=5, features=features) batch_size = 3 predictions = array_ops.constant( [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0], [0.0, 0.0, 0.0, 2.0, 1.2]], dtype=dtypes.float32) labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32) weights = array_ops.ones([batch_size, 1], dtypes.float32) partition_ids = array_ops.zeros([batch_size], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, # This should result in a tree built for a class 2. "num_trees": 13, } # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( losses.per_example_maxent_loss( labels, weights, predictions, num_classes=learner_config.num_classes)[0]), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 0) self.assertEqual(len(output.tree_weights), 0) self.assertEqual(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output.ParseFromString(serialized.eval()) self.assertEqual(len(output.trees), 1) self.assertAllClose(output.tree_weights, [1]) self.assertEqual(stamp_token.eval(), 2) # One node for a split, two children nodes. self.assertEqual(3, len(output.trees[0].nodes)) # Leafs will have a sparse vector for class 3. self.assertEqual(1, len(output.trees[0].nodes[1].leaf.sparse_vector.index)) self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0]) self.assertAlmostEqual( -1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0]) self.assertEqual(1, len(output.trees[0].nodes[2].leaf.sparse_vector.index)) self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0]) self.assertAllClose( 0.893284678459, output.trees[0].nodes[2].leaf.sparse_vector.value[0], atol=1e-4, rtol=1e-4) def testTrainFnChiefFeatureSelectionReachedLimitNoGoodSplit(self): """Tests the train function running on chief with feature selection.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.max_number_of_unique_feature_columns = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32) # Feature 1 is predictive but it won't be used because we have reached the # limit of num_used_handlers >= max_number_of_unique_feature_columns features["dense_float_1"] = array_ops.constant([0, 0, 1, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, "num_used_handlers": array_ops.constant(1, dtype=dtypes.int64), "used_handlers_mask": array_ops.constant([True, False], dtype=dtypes.bool), } labels = array_ops.constant([0, 0, 1, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) # On second run, expect a trivial split to be chosen to basically # predict the average. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [0.1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { dense_float_binary_split { feature_column: 0 threshold: 1.0 left_id: 1 right_id: 2 } node_metadata { gain: 0 } } nodes { leaf { vector { value: -0.25 } } } nodes { leaf { vector { value: 0.0 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testTrainFnChiefFeatureSelectionWithGoodSplits(self): """Tests the train function running on chief with feature selection.""" with self.cached_session() as sess: ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.max_number_of_unique_feature_columns = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32) # Feature 1 is predictive and is in our selected features so it will be # used even when we're at the limit. features["dense_float_1"] = array_ops.constant([0, 0, 1, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, "num_used_handlers": array_ops.constant(1, dtype=dtypes.int64), "used_handlers_mask": array_ops.constant([False, True], dtype=dtypes.bool), } labels = array_ops.constant([0, 0, 1, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertAllClose(output.tree_weights, [0.1]) self.assertEquals(stamp_token.eval(), 2) expected_tree = """ nodes { dense_float_binary_split { feature_column: 1 left_id: 1 right_id: 2 } node_metadata { gain: 0.5 } } nodes { leaf { vector { value: 0.0 } } } nodes { leaf { vector { value: -0.5 } } }""" self.assertProtoEquals(expected_tree, output.trees[0]) def testTrainFnChiefFeatureSelectionReachedLimitIncrementAttemptedLayer(self): """Tests the train function running on chief with feature selection.""" with self.cached_session() as sess: tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() tree = tree_ensemble_config.trees.add() _set_float_split( tree.nodes.add().sparse_float_binary_split_default_right.split, 2, 4.0, 1, 2) _append_to_leaf(tree.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree.nodes.add().leaf, 1, 1.2) tree_ensemble_config.tree_weights.append(1.0) metadata = tree_ensemble_config.tree_metadata.add() metadata.is_finalized = False metadata.num_layers_grown = 1 tree_ensemble_config = tree_ensemble_config.SerializeToString() ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config, name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.max_number_of_unique_feature_columns = 1 learner_config.constraints.min_node_weight = 0 features = {} # Both features will be disabled since the feature selection limit is # already reached. features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32) features["dense_float_1"] = array_ops.constant([0, 0, 1, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = variables.VariableV1( initial_value=0, name="ensemble_stamp", trainable=False, dtype=dtypes.int64) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, # We have somehow reached our limit 1. Both of the handlers will be # disabled. "num_used_handlers": array_ops.constant(1, dtype=dtypes.int64), "used_handlers_mask": array_ops.constant([False, False], dtype=dtypes.bool), } labels = array_ops.constant([0, 0, 1, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) # Create train op. train_op = gbdt_model.train( loss=math_ops.reduce_mean( _squared_loss(labels, weights, predictions)), predictions_dict=predictions_dict, labels=labels) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect no splits to be chosen because the quantile # buckets will not be ready. train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertEquals(output.growing_metadata.num_layers_attempted, 1) self.assertEquals(stamp_token.eval(), 1) # Update the stamp to be able to run a second time. sess.run([ensemble_stamp.assign_add(1)]) train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) # Make sure the trees are not modified, but the num_layers_attempted is # incremented so that eventually the training stops. self.assertEquals(len(output.trees), 1) self.assertEquals(len(output.trees[0].nodes), 3) self.assertEquals(output.growing_metadata.num_layers_attempted, 2) def testResetModelBeforeAndAfterSplit(self): """Tests whether resetting works.""" with self.cached_session(): # First build a small tree and train it to verify training works. ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, "max_tree_depth": 4, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions)) # Create train op. update_op, reset_op, training_state = gbdt_model.update_stats( loss, predictions_dict) with ops.control_dependencies(update_op): train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble( predictions_dict, training_state) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() original_stamp = ensemble_stamp.eval() expected_tree = """ nodes { dense_float_binary_split { threshold: 1.0 left_id: 1 right_id: 2 } node_metadata { gain: 0 } } nodes { leaf { vector { value: 0.25 } } } nodes { leaf { vector { value: 0.0 } } }""" def _train_once_and_check(expect_split): stamp = ensemble_stamp.eval() train_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(stamp_token.eval(), stamp + 1) if expect_split: # State of the ensemble after a split occurs. self.assertEquals(len(output.trees), 1) self.assertProtoEquals(expected_tree, output.trees[0]) else: # State of the ensemble after a single accumulation but before any # splitting occurs self.assertEquals(len(output.trees), 0) self.assertProtoEquals(""" growing_metadata { num_trees_attempted: 1 num_layers_attempted: 1 }""", output) def _run_reset(): stamp_before_reset = ensemble_stamp.eval() reset_op.run() stamp_after_reset = ensemble_stamp.eval() self.assertNotEquals(stamp_after_reset, stamp_before_reset) _, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertProtoEquals("", output) return stamp_after_reset # Exit after one train_op, so no new layer are created but the handlers # contain enough information to split on the next call to train. _train_once_and_check(expect_split=False) self.assertEquals(ensemble_stamp.eval(), original_stamp + 1) # Reset the handlers so it still requires two training calls to split. stamp_after_reset = _run_reset() _train_once_and_check(expect_split=False) _train_once_and_check(expect_split=True) self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2) # This time, test that the reset_op works right after splitting. stamp_after_reset = _run_reset() # Test that after resetting, the tree can be trained as normal. _train_once_and_check(expect_split=False) _train_once_and_check(expect_split=True) self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2) def testResetModelNonChief(self): """Tests the reset function on a non-chief worker.""" with self.cached_session(): # Create ensemble with one bias node. ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() text_format.Merge( """ trees { nodes { leaf { vector { value: 0.25 } } } } tree_weights: 1.0 tree_metadata { num_tree_weight_updates: 1 num_layers_grown: 1 is_finalized: false }""", ensemble_config) ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=ensemble_config.SerializeToString(), name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=False, num_ps_replicas=0, center_bias=False, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions)) # Create reset op. _, reset_op, _ = gbdt_model.update_stats( loss, predictions_dict) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # Reset op doesn't do anything because this is a non-chief worker. reset_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 1) self.assertEquals(len(output.tree_weights), 1) self.assertEquals(stamp_token.eval(), 0) def testResetModelWithCenterBias(self): """Tests the reset function running on chief with bias centering.""" with self.cached_session(): ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config="", name="tree_ensemble") learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 learner_config.num_classes = 2 learner_config.regularization.l1 = 0 learner_config.regularization.l2 = 0 learner_config.constraints.max_tree_depth = 1 learner_config.constraints.min_node_weight = 0 features = {} features["dense_float"] = array_ops.ones([4, 1], dtypes.float32) gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel( is_chief=True, num_ps_replicas=0, center_bias=True, ensemble_handle=ensemble_handle, examples_per_layer=1, learner_config=learner_config, logits_dimension=1, features=features) predictions = array_ops.constant( [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32) partition_ids = array_ops.zeros([4], dtypes.int32) ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle) predictions_dict = { "predictions": predictions, "predictions_no_dropout": predictions, "partition_ids": partition_ids, "ensemble_stamp": ensemble_stamp, "num_trees": 12, } labels = array_ops.ones([4, 1], dtypes.float32) weights = array_ops.ones([4, 1], dtypes.float32) loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions)) # Create train op. update_op, reset_op, training_state = gbdt_model.update_stats( loss, predictions_dict) with ops.control_dependencies(update_op): train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble( predictions_dict, training_state) variables.global_variables_initializer().run() resources.initialize_resources(resources.shared_resources()).run() # On first run, expect bias to be centered. def train_and_check(): train_op.run() _, serialized = model_ops.tree_ensemble_serialize(ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) expected_tree = """ nodes { leaf { vector { value: 0.25 } } }""" self.assertEquals(len(output.trees), 1) self.assertAllEqual(output.tree_weights, [1.0]) self.assertProtoEquals(expected_tree, output.trees[0]) train_and_check() self.assertEquals(ensemble_stamp.eval(), 1) reset_op.run() stamp_token, serialized = model_ops.tree_ensemble_serialize( ensemble_handle) output = tree_config_pb2.DecisionTreeEnsembleConfig() output.ParseFromString(serialized.eval()) self.assertEquals(len(output.trees), 0) self.assertEquals(len(output.tree_weights), 0) self.assertEquals(stamp_token.eval(), 2) train_and_check() self.assertEquals(ensemble_stamp.eval(), 3) if __name__ == "__main__": googletest.main()
apache-2.0
PhilippeTillet/DSHF-ICA
python/examples/infomax_.py
2
14364
# Authors: Lukas Breuer <[email protected]> # Juergen Dammers <[email protected]> # Denis A. Engeman <[email protected]> # # License: BSD (3-clause) import math import logging import numpy as np logger = logging.getLogger('mne') # one selection here used across mne-python logger.propagate = False # don't propagate (in case of multiple imports) def random_permutation(n_samples, random_state=None): """Helper to emulate the randperm matlab function. It returns a vector containing a random permutation of the integers between 0 and n_samples-1. It returns the same random numbers than randperm matlab function whenever the random_state is the same as the matlab's random seed. This function is useful for comparing against matlab scripts which use the randperm function. Note: the randperm(n_samples) matlab function generates a random sequence between 1 and n_samples, whereas random_permutation(n_samples, random_state) function generates a random sequence between 0 and n_samples-1, that is: randperm(n_samples) = random_permutation(n_samples, random_state) - 1 Parameters ---------- n_samples : int End point of the sequence to be permuted (excluded, i.e., the end point is equal to n_samples-1) random_state : int | None Random seed for initializing the pseudo-random number generator. Returns ------- randperm : ndarray, int Randomly permuted sequence between 0 and n-1. """ rng = check_random_state(random_state) idx = rng.rand(n_samples) randperm = np.argsort(idx) return randperm def check_random_state(seed): """Turn seed into a np.random.RandomState instance If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (int, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed) def infomax(raw_data, weights=None, l_rate=None, block=None, w_change=1e-12, anneal_deg=60., anneal_step=0.9, extended=True, n_subgauss=0, kurt_size=6000, ext_blocks=1, max_iter=200, random_state=None, blowup=1e4, blowup_fac=0.5, n_small_angle=20, use_bias=False, verbose=None): """Run (extended) Infomax ICA decomposition on raw data. Parameters ---------- data : np.ndarray, shape (n_samples, n_features) The whitened data to unmix. weights : np.ndarray, shape (n_features, n_features) The initialized unmixing matrix. Defaults to None, which means the identity matrix is used. l_rate : float This quantity indicates the relative size of the change in weights. .. note:: Smaller learning rates will slow down the ICA procedure. Defaults to 0.01 / log(n_features ** 2). block : int The block size of randomly chosen data segments. Defaults to floor(sqrt(n_times / 3.)). w_change : float The change at which to stop iteration. Defaults to 1e-12. anneal_deg : float The angle (in degrees) at which the learning rate will be reduced. Defaults to 60.0. anneal_step : float The factor by which the learning rate will be reduced once ``anneal_deg`` is exceeded: l_rate *= anneal_step Defaults to 0.9. extended : bool Whether to use the extended Infomax algorithm or not. Defaults to True. n_subgauss : int The number of subgaussian components. Only considered for extended Infomax. Defaults to 1. kurt_size : int The window size for kurtosis estimation. Only considered for extended Infomax. Defaults to 6000. ext_blocks : int Only considered for extended Infomax. If positive, denotes the number of blocks after which to recompute the kurtosis, which is used to estimate the signs of the sources. In this case, the number of sub-gaussian sources is automatically determined. If negative, the number of sub-gaussian sources to be used is fixed and equal to n_subgauss. In this case, the kurtosis is not estimated. Defaults to 1. max_iter : int The maximum number of iterations. Defaults to 200. random_state : int | np.random.RandomState If random_state is an int, use random_state to seed the random number generator. If random_state is already a np.random.RandomState instance, use random_state as random number generator. blowup : float The maximum difference allowed between two successive estimations of the unmixing matrix. Defaults to 10000. blowup_fac : float The factor by which the learning rate will be reduced if the difference between two successive estimations of the unmixing matrix exceededs ``blowup``: l_rate *= blowup_fac Defaults to 0.5. n_small_angle : int | None The maximum number of allowed steps in which the angle between two successive estimations of the unmixing matrix is less than ``anneal_deg``. If None, this parameter is not taken into account to stop the iterations. Defaults to 20. use_bias : bool This quantity indicates if the bias should be computed. Defaults to True. verbose : bool, str, int, or None If not None, override default verbosity level (see mne.verbose). Returns ------- unmixing_matrix : np.ndarray, shape (n_features, n_features) The linear unmixing operator. References ---------- [1] A. J. Bell, T. J. Sejnowski. An information-maximization approach to blind separation and blind deconvolution. Neural Computation, 7(6), 1129-1159, 1995. [2] T. W. Lee, M. Girolami, T. J. Sejnowski. Independent component analysis using an extended infomax algorithm for mixed subgaussian and supergaussian sources. Neural Computation, 11(2), 417-441, 1999. """ from scipy.stats import kurtosis from scipy.linalg import sqrtm rng = check_random_state(random_state) data = raw_data - np.mean(raw_data, 0, keepdims=True) sphere = 2*np.linalg.inv(sqrtm(np.cov(data.T))) data = np.dot(data, sphere) #print data[0,0], data[0,1], data[1,0] # define some default parameters max_weight = 1e8 restart_fac = 0.9 min_l_rate = 1e-10 degconst = 180.0 / np.pi # for extended Infomax extmomentum = 0.5 signsbias = 0.02 signcount_threshold = 25 signcount_step = 2 # check data shape n_samples, n_features = data.shape n_features_square = n_features ** 2 # check input parameters # heuristic default - may need adjustment for large or tiny data sets if l_rate is None: l_rate = 0.01 / math.log(n_features ** 2.0) if block is None: block = int(math.floor(math.sqrt(n_samples / 3.0))) logger.info('computing%sInfomax ICA' % ' Extended ' if extended else ' ') # collect parameters nblock = n_samples // block lastt = (nblock - 1) * block + 1 # initialize training if weights is None: weights = np.identity(n_features, dtype=np.float64) BI = block * np.identity(n_features, dtype=np.float64) bias = np.zeros((n_features, 1), dtype=np.float64) onesrow = np.ones((1, block), dtype=np.float64) startweights = weights.copy() oldweights = startweights.copy() step = 0 count_small_angle = 0 wts_blowup = False blockno = 0 signcount = 0 initial_ext_blocks = ext_blocks # save the initial value in case of reset # for extended Infomax if extended: signs = np.ones(n_features) for k in range(n_subgauss): signs[k] = -1 kurt_size = min(kurt_size, n_samples) old_kurt = np.zeros(n_features, dtype=np.float64) oldsigns = np.zeros(n_features) # trainings loop olddelta, oldchange = 1., 0. while step < max_iter: Z = np.dot(data, weights) # shuffle data at each step permute = random_permutation(n_samples, rng) # ICA training block # loop across block samples for t in range(0, lastt, block): u = np.dot(data[permute[t:t + block], :], weights) u += np.dot(bias, onesrow).T if extended: # extended ICA update y = np.tanh(u) weights += l_rate * np.dot(weights, BI - signs[None, :] * np.dot(u.T, y) - np.dot(u.T, u)) if use_bias: bias += l_rate * np.reshape(np.sum(y, axis=0, dtype=np.float64) * -2.0, (n_features, 1)) else: # logistic ICA weights update y = 1.0 / (1.0 + np.exp(-u)) weights += l_rate * np.dot(weights, BI + np.dot(u.T, (1.0 - 2.0 * y))) if use_bias: bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0, dtype=np.float64), (n_features, 1)) # check change limit max_weight_val = np.max(np.abs(weights)) if max_weight_val > max_weight: wts_blowup = True blockno += 1 if wts_blowup: break # ICA kurtosis estimation if extended: if ext_blocks > 0 and blockno % ext_blocks == 0: if kurt_size < n_samples: rp = np.floor(rng.uniform(0, 1, kurt_size) * (n_samples - 1)) tpartact = np.dot(data[rp.astype(int), :], weights).T else: tpartact = np.dot(data, weights).T # estimate kurtosis kurt = kurtosis(tpartact, axis=1, fisher=True) if extmomentum != 0: kurt = (extmomentum * old_kurt + (1.0 - extmomentum) * kurt) old_kurt = kurt # estimate weighted signs signs = np.sign(kurt + signsbias) ndiff = (signs - oldsigns != 0).sum() if ndiff == 0: signcount += 1 else: signcount = 0 oldsigns = signs if signcount >= signcount_threshold: ext_blocks = np.fix(ext_blocks * signcount_step) signcount = 0 # here we continue after the for loop over the ICA training blocks # if weights in bounds: if not wts_blowup: oldwtchange = weights - oldweights step += 1 angledelta = 0.0 delta = oldwtchange.reshape(1, n_features_square) change = np.sum(delta * delta, dtype=np.float64) if step > 2: angledelta = math.acos(np.sum(delta * olddelta) / math.sqrt(change * oldchange)) angledelta *= degconst if verbose: logger.info( 'step %d - lrate %5f, wchange %8.8f, angledelta %4.1f deg' % (step, l_rate, change, angledelta)) # anneal learning rate oldweights = weights.copy() if angledelta > anneal_deg: l_rate *= anneal_step # anneal learning rate # accumulate angledelta until anneal_deg reaches l_rate olddelta = delta oldchange = change count_small_angle = 0 # reset count when angledelta is large else: if step == 1: # on first step only olddelta = delta # initialize oldchange = change if n_small_angle is not None: count_small_angle += 1 if count_small_angle > n_small_angle: max_iter = step # apply stopping rule if step > 2 and change < w_change: step = max_iter elif change > blowup: l_rate *= blowup_fac # restart if weights blow up (for lowering l_rate) else: step = 0 # start again wts_blowup = 0 # re-initialize variables blockno = 1 l_rate *= restart_fac # with lower learning rate weights = startweights.copy() oldweights = startweights.copy() olddelta = np.zeros((1, n_features_square), dtype=np.float64) bias = np.zeros((n_features, 1), dtype=np.float64) ext_blocks = initial_ext_blocks # for extended Infomax if extended: signs = np.ones(n_features) for k in range(n_subgauss): signs[k] = -1 oldsigns = np.zeros(n_features) if l_rate > min_l_rate: if verbose: logger.info('... lowering learning rate to %g' '\n... re-starting...' % l_rate) else: raise ValueError('Error in Infomax ICA: unmixing_matrix matrix' 'might not be invertible!') if verbose: cost = -(np.linalg.slogdet(weights)[1] - np.sum(np.mean(2*np.log(1 + np.exp(Z)) - Z, 0))) print 'step {}: cost = {:.4f}'.format(step, cost) # prepare return values return np.dot(weights.T, sphere)
mit
otype/myip
setup.py
1
8799
# -*- coding: utf-8 -*- from __future__ import print_function import os import sys import imp import subprocess ## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill! if 'check_output' not in dir(subprocess): def check_output(cmd_args, *args, **kwargs): proc = subprocess.Popen( cmd_args, *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) out, err = proc.communicate() if proc.returncode != 0: raise subprocess.CalledProcessError(args) return out subprocess.check_output = check_output from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand try: import colorama colorama.init() # Initialize colorama on Windows except ImportError: # Don't require colorama just for running paver tasks. This allows us to # run `paver install' without requiring the user to first have colorama # installed. pass # Add the current directory to the module search path. sys.path.append('.') ## Constants CODE_DIRECTORY = 'myip' DOCS_DIRECTORY = 'docs' TESTS_DIRECTORY = 'tests' PYTEST_FLAGS = ['--doctest-modules'] # Import metadata. Normally this would just be: # # from myip import metadata # # However, when we do this, we also import `myip/__init__.py'. If this # imports names from some other modules and these modules have third-party # dependencies that need installing (which happens after this file is run), the # script will crash. What we do instead is to load the metadata module by path # instead, effectively side-stepping the dependency problem. Please make sure # metadata has no dependencies, otherwise they will need to be added to # the setup_requires keyword. metadata = imp.load_source( 'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py')) ## Miscellaneous helper functions def get_project_files(): """Retrieve a list of project files, ignoring hidden files. :return: sorted list of project files :rtype: :class:`list` """ if is_git_project(): return get_git_project_files() project_files = [] for top, subdirs, files in os.walk('.'): for subdir in subdirs: if subdir.startswith('.'): subdirs.remove(subdir) for f in files: if f.startswith('.'): continue project_files.append(os.path.join(top, f)) return project_files def is_git_project(): return os.path.isdir('.git') def get_git_project_files(): """Retrieve a list of all non-ignored files, including untracked files, excluding deleted files. :return: sorted list of git project files :rtype: :class:`list` """ cached_and_untracked_files = git_ls_files( '--cached', # All files cached in the index '--others', # Untracked files # Exclude untracked files that would be excluded by .gitignore, etc. '--exclude-standard') uncommitted_deleted_files = git_ls_files('--deleted') # Since sorting of files in a set is arbitrary, return a sorted list to # provide a well-defined order to tools like flake8, etc. return sorted(cached_and_untracked_files - uncommitted_deleted_files) def git_ls_files(*cmd_args): """Run ``git ls-files`` in the top-level project directory. Arguments go directly to execution call. :return: set of file names :rtype: :class:`set` """ cmd = ['git', 'ls-files'] cmd.extend(cmd_args) return set(subprocess.check_output(cmd).splitlines()) def print_success_message(message): """Print a message indicating success in green color to STDOUT. :param message: the message to print :type message: :class:`str` """ try: import colorama print(colorama.Fore.GREEN + message + colorama.Fore.RESET) except ImportError: print(message) def print_failure_message(message): """Print a message indicating failure in red color to STDERR. :param message: the message to print :type message: :class:`str` """ try: import colorama print(colorama.Fore.RED + message + colorama.Fore.RESET, file=sys.stderr) except ImportError: print(message, file=sys.stderr) def read(filename): """Return the contents of a file. :param filename: file path :type filename: :class:`str` :return: the file's content :rtype: :class:`str` """ with open(os.path.join(os.path.dirname(__file__), filename)) as f: return f.read() def _lint(): """Run lint and return an exit code.""" # Flake8 doesn't have an easy way to run checks using a Python function, so # just fork off another process to do it. # Python 3 compat: # - The result of subprocess call outputs are byte strings, meaning we need # to pass a byte string to endswith. project_python_files = [filename for filename in get_project_files() if filename.endswith(b'.py')] retcode = subprocess.call( ['flake8', '--max-complexity=10'] + project_python_files) if retcode == 0: print_success_message('No style errors') return retcode def _test(): """Run the unit tests. :return: exit code """ # Make sure to import pytest in this function. For the reason, see here: # <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8 import pytest # This runs the unit tests. # It also runs doctest, but only on the modules in TESTS_DIRECTORY. return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY]) def _test_all(): """Run lint and tests. :return: exit code """ return _lint() + _test() # The following code is to allow tests to be run with `python setup.py test'. # The main reason to make this possible is to allow tests to be run as part of # Setuptools' automatic run of 2to3 on the source code. The recommended way to # run tests is still `paver test_all'. # See <http://pythonhosted.org/setuptools/python3.html> # Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8 class TestAllCommand(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) # These are fake, and just set to appease distutils and setuptools. self.test_suite = True self.test_args = [] def run_tests(self): raise SystemExit(_test_all()) # define install_requires for specific Python versions python_version_specific_requires = [] # as of Python >= 2.7 and >= 3.2, the argparse module is maintained within # the Python standard library, otherwise we install it as a separate package if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3): python_version_specific_requires.append('argparse') # See here for more options: # <http://pythonhosted.org/setuptools/setuptools.html> setup_dict = dict( name=metadata.package, version=metadata.version, author=metadata.authors[0], author_email=metadata.emails[0], maintainer=metadata.authors[0], maintainer_email=metadata.emails[0], url=metadata.url, description=metadata.description, long_description=read('README.rst'), # Find a list of classifiers here: # <http://pypi.python.org/pypi?%3Aaction=list_classifiers> classifiers=[ 'Development Status :: 1 - Planning', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Documentation', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Software Distribution', ], packages=find_packages(exclude=(TESTS_DIRECTORY,)), install_requires=[ # your module dependencies ] + python_version_specific_requires, # Allow tests to be run with `python setup.py test'. tests_require=[ 'pytest==2.5.1', 'mock==1.0.1', 'flake8==2.1.0', ], cmdclass={'test': TestAllCommand}, zip_safe=False, # don't use eggs entry_points={ 'console_scripts': [ 'myip_cli = myip.main:entry_point' ], # if you have a gui, use this # 'gui_scripts': [ # 'myip_gui = myip.gui:entry_point' # ] } ) def main(): setup(**setup_dict) if __name__ == '__main__': main()
mit
pyokagan/gyp
buildbot/buildbot_run.py
10
8342
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Argument-less script to select what to run on the buildbots.""" import filecmp import os import shutil import subprocess import sys if sys.platform in ['win32', 'cygwin']: EXE_SUFFIX = '.exe' else: EXE_SUFFIX = '' BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__)) TRUNK_DIR = os.path.dirname(BUILDBOT_DIR) ROOT_DIR = os.path.dirname(TRUNK_DIR) ANDROID_DIR = os.path.join(ROOT_DIR, 'android') CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake') CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin') OUT_DIR = os.path.join(TRUNK_DIR, 'out') def CallSubProcess(*args, **kwargs): """Wrapper around subprocess.call which treats errors as build exceptions.""" with open(os.devnull) as devnull_fd: retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs) if retcode != 0: print '@@@STEP_EXCEPTION@@@' sys.exit(1) def PrepareCmake(): """Build CMake 2.8.8 since the version in Precise is 2.8.7.""" if os.environ['BUILDBOT_CLOBBER'] == '1': print '@@@BUILD_STEP Clobber CMake checkout@@@' shutil.rmtree(CMAKE_DIR) # We always build CMake 2.8.8, so no need to do anything # if the directory already exists. if os.path.isdir(CMAKE_DIR): return print '@@@BUILD_STEP Initialize CMake checkout@@@' os.mkdir(CMAKE_DIR) print '@@@BUILD_STEP Sync CMake@@@' CallSubProcess( ['git', 'clone', '--depth', '1', '--single-branch', '--branch', 'v2.8.8', '--', 'git://cmake.org/cmake.git', CMAKE_DIR], cwd=CMAKE_DIR) print '@@@BUILD_STEP Build CMake@@@' CallSubProcess( ['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR], cwd=CMAKE_DIR) CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR) _ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng' def PrepareAndroidTree(): """Prepare an Android tree to run 'android' format tests.""" if os.environ['BUILDBOT_CLOBBER'] == '1': print '@@@BUILD_STEP Clobber Android checkout@@@' shutil.rmtree(ANDROID_DIR) # (Re)create the directory so that the following steps will succeed. if not os.path.isdir(ANDROID_DIR): os.mkdir(ANDROID_DIR) # We use a manifest from the gyp project listing pinned revisions of AOSP to # use, to ensure that we test against a stable target. This needs to be # updated to pick up new build system changes sometimes, so we must test if # it has changed. manifest_filename = 'aosp_manifest.xml' gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename) android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests', manifest_filename) manifest_is_current = (os.path.isfile(android_manifest) and filecmp.cmp(gyp_manifest, android_manifest)) if not manifest_is_current: # It's safe to repeat these steps, so just do them again to make sure we are # in a good state. print '@@@BUILD_STEP Initialize Android checkout@@@' CallSubProcess( ['repo', 'init', '-u', 'https://android.googlesource.com/platform/manifest', '-b', 'master', '-g', 'all,-notdefault,-device,-darwin,-mips,-x86'], cwd=ANDROID_DIR) shutil.copy(gyp_manifest, android_manifest) print '@@@BUILD_STEP Sync Android@@@' CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename], cwd=ANDROID_DIR) # If we already built the system image successfully and didn't sync to a new # version of the source, skip running the build again as it's expensive even # when there's nothing to do. system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic', 'system.img') if manifest_is_current and os.path.isfile(system_img): return print '@@@BUILD_STEP Build Android@@@' CallSubProcess( ['/bin/bash', '-c', '%s && make -j4' % _ANDROID_SETUP], cwd=ANDROID_DIR) def StartAndroidEmulator(): """Start an android emulator from the built android tree.""" print '@@@BUILD_STEP Start Android emulator@@@' CallSubProcess(['/bin/bash', '-c', '%s && adb kill-server ' % _ANDROID_SETUP], cwd=ANDROID_DIR) # If taskset is available, use it to force adbd to run only on one core, as, # sadly, it improves its reliability (see crbug.com/268450). adbd_wrapper = '' with open(os.devnull, 'w') as devnull_fd: if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0: adbd_wrapper = 'taskset -c 0' CallSubProcess(['/bin/bash', '-c', '%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)], cwd=ANDROID_DIR) subprocess.Popen( ['/bin/bash', '-c', '%s && emulator -no-window' % _ANDROID_SETUP], cwd=ANDROID_DIR) CallSubProcess( ['/bin/bash', '-c', '%s && adb wait-for-device' % _ANDROID_SETUP], cwd=ANDROID_DIR) def StopAndroidEmulator(): """Stop all android emulators.""" print '@@@BUILD_STEP Stop Android emulator@@@' # If this fails, it's because there is no emulator running. subprocess.call(['pkill', 'emulator.*']) def GypTestFormat(title, format=None, msvs_version=None, tests=[]): """Run the gyp tests for a given format, emitting annotator tags. See annotator docs at: https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations Args: format: gyp format to test. Returns: 0 for sucesss, 1 for failure. """ if not format: format = title print '@@@BUILD_STEP ' + title + '@@@' sys.stdout.flush() env = os.environ.copy() if msvs_version: env['GYP_MSVS_VERSION'] = msvs_version command = ' '.join( [sys.executable, 'trunk/gyptest.py', '--all', '--passed', '--format', format, '--path', CMAKE_BIN_DIR, '--chdir', 'trunk'] + tests) if format == 'android': # gyptest needs the environment setup from envsetup/lunch in order to build # using the 'android' backend, so this is done in a single shell. retcode = subprocess.call( ['/bin/bash', '-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)], cwd=ANDROID_DIR, env=env) else: retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True) if retcode: # Emit failure tag, and keep going. print '@@@STEP_FAILURE@@@' return 1 return 0 def GypBuild(): # Dump out/ directory. print '@@@BUILD_STEP cleanup@@@' print 'Removing %s...' % OUT_DIR shutil.rmtree(OUT_DIR, ignore_errors=True) print 'Done.' retcode = 0 # The Android gyp bot runs on linux so this must be tested first. if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android': PrepareAndroidTree() StartAndroidEmulator() try: retcode += GypTestFormat('android') finally: StopAndroidEmulator() elif sys.platform.startswith('linux'): retcode += GypTestFormat('ninja') retcode += GypTestFormat('make') PrepareCmake() retcode += GypTestFormat('cmake') elif sys.platform == 'darwin': retcode += GypTestFormat('ninja') retcode += GypTestFormat('xcode') retcode += GypTestFormat('make') elif sys.platform == 'win32': retcode += GypTestFormat('ninja') if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64': retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja', msvs_version='2013', tests=[ r'test\generator-output\gyptest-actions.py', r'test\generator-output\gyptest-relocate.py', r'test\generator-output\gyptest-rules.py']) retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013') else: raise Exception('Unknown platform') if retcode: # TODO(bradnelson): once the annotator supports a postscript (section for # after the build proper that could be used for cumulative failures), # use that instead of this. This isolates the final return value so # that it isn't misattributed to the last stage. print '@@@BUILD_STEP failures@@@' sys.exit(retcode) if __name__ == '__main__': GypBuild()
bsd-3-clause
Dioptas/Dioptas
dioptas/model/MaskModel.py
1
11647
# -*- coding: utf-8 -*- # Dioptas - GUI program for fast processing of 2D X-ray diffraction data # Principal author: Clemens Prescher ([email protected]) # Copyright (C) 2014-2019 GSECARS, University of Chicago, USA # Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany # Copyright (C) 2019-2020 DESY, Hamburg, Germany # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import deque import numpy as np import skimage.draw from PIL import Image from qtpy import QtCore from math import sqrt, atan2, cos, sin from .util.cosmics import cosmicsimage class MaskModel(object): def __init__(self, mask_dimension=(2048, 2048)): self.mask_dimension = mask_dimension self.reset_dimension() self.filename = '' self.mode = True self.roi = None self._mask_data = np.zeros(self.mask_dimension, dtype=bool) self._undo_deque = deque(maxlen=50) self._redo_deque = deque(maxlen=50) def set_dimension(self, mask_dimension): if not np.array_equal(mask_dimension, self.mask_dimension): self.mask_dimension = mask_dimension self.reset_dimension() def reset_dimension(self): if self.mask_dimension is not None: self._mask_data = np.zeros(self.mask_dimension, dtype=bool) self._undo_deque = deque(maxlen=50) self._redo_deque = deque(maxlen=50) @property def roi_mask(self): if self.roi is not None: roi_mask = np.ones(self.mask_dimension) x1, x2, y1, y2 = self.roi if x1 < 0: x1 = 0 if y1 < 0: y1 = 0 roi_mask[int(x1):int(x2), int(y1):int(y2)] = 0 return roi_mask else: return None def get_mask(self): if self.roi is None: return self._mask_data elif self.roi is not None: return np.logical_or(self._mask_data, self.roi_mask) def get_img(self): return self._mask_data def update_deque(self): """ Saves the current mask data into a deque, which can be popped later to provide an undo/redo feature. When performing a new action the old redo steps will be cleared..._ """ self._undo_deque.append(np.copy(self._mask_data)) self._redo_deque.clear() def undo(self): try: old_data = self._undo_deque.pop() self._redo_deque.append(np.copy(self._mask_data)) self._mask_data = old_data except IndexError: pass def redo(self): try: new_data = self._redo_deque.pop() self._undo_deque.append(np.copy(self._mask_data)) self._mask_data = new_data except IndexError: pass def mask_below_threshold(self, img_data, threshold): self.update_deque() self._mask_data += (img_data < threshold) def mask_above_threshold(self, img_data, threshold): self.update_deque() self._mask_data += (img_data > threshold) def mask_QGraphicsRectItem(self, QGraphicsRectItem): rect = QGraphicsRectItem.rect() self.mask_rect(rect.top(), rect.left(), rect.height(), rect.width()) def mask_QGraphicsPolygonItem(self, QGraphicsPolygonItem): """ Masks a polygon given by a QGraphicsPolygonItem from the QtWidgets Library. Uses the sklimage.draw.polygon function. """ # get polygon points poly_list = list(QGraphicsPolygonItem.vertices) x = np.zeros(len(poly_list)) y = np.zeros(len(poly_list)) for i, point in enumerate(poly_list): x[i] = point.x() y[i] = point.y() self.mask_polygon(x, y) def mask_QGraphicsEllipseItem(self, QGraphicsEllipseItem): """ Masks an Ellipse given by a QGraphicsEllipseItem from the QtWidgets Library. Uses the skimage.draw.ellipse function. """ bounding_rect = QGraphicsEllipseItem.rect() cx = bounding_rect.center().x() cy = bounding_rect.center().y() x_radius = bounding_rect.width() * 0.5 y_radius = bounding_rect.height() * 0.5 self.mask_ellipse(int(cx), int(cy), int(x_radius), int(y_radius)) def mask_rect(self, x, y, width, height): """ Masks a rectangle. x and y parameters are the upper left corner of the rectangle. """ self.update_deque() if width > 0: x_ind1 = np.round(x) x_ind2 = np.round(x + width) else: x_ind1 = np.round(x + width) x_ind2 = np.round(x) if height > 0: y_ind1 = np.round(y) y_ind2 = np.round(y + height) else: y_ind1 = np.round(y + height) y_ind2 = np.round(y) if x_ind1 < 0: x_ind1 = 0 if y_ind1 < 0: y_ind1 = 0 x_ind1, x_ind2, y_ind1, y_ind2 = int(x_ind1), int(x_ind2), int(y_ind1), int(y_ind2) self._mask_data[x_ind1:x_ind2, y_ind1:y_ind2] = self.mode def mask_polygon(self, x, y): """ Masks the a polygon with given vertices. x and y are lists of the polygon vertices. Uses the draw.polygon implementation of the skimage library. """ self.update_deque() rr, cc = skimage.draw.polygon(y, x, self._mask_data.shape) self._mask_data[rr, cc] = self.mode def mask_ellipse(self, cx, cy, x_radius, y_radius): """ Masks an ellipse with center coordinates (cx, cy) and the radii given. Uses the draw.ellipse implementation of the skimage library. """ self.update_deque() rr, cc = skimage.draw.ellipse( cy, cx, y_radius, x_radius, shape=self._mask_data.shape) self._mask_data[rr, cc] = self.mode def grow(self): self.update_deque() self._mask_data[1:, :] = np.logical_or(self._mask_data[1:, :], self._mask_data[:-1, :]) self._mask_data[:-1, :] = np.logical_or(self._mask_data[:-1, :], self._mask_data[1:, :]) self._mask_data[:, 1:] = np.logical_or(self._mask_data[:, 1:], self._mask_data[:, :-1]) self._mask_data[:, :-1] = np.logical_or(self._mask_data[:, :-1], self._mask_data[:, 1:]) def shrink(self): self.update_deque() self._mask_data[1:, :] = np.logical_and(self._mask_data[1:, :], self._mask_data[:-1, :]) self._mask_data[:-1, :] = np.logical_and(self._mask_data[:-1, :], self._mask_data[1:, :]) self._mask_data[:, 1:] = np.logical_and(self._mask_data[:, 1:], self._mask_data[:, :-1]) self._mask_data[:, :-1] = np.logical_and(self._mask_data[:, :-1], self._mask_data[:, 1:]) def invert_mask(self): self.update_deque() self._mask_data = np.logical_not(self._mask_data) def clear_mask(self): self.update_deque() self._mask_data[:, :] = False def remove_cosmic(self, img): self.update_deque() test = cosmicsimage(img, sigclip=3.0, objlim=3.0) num = 2 for i in range(num): test.lacosmiciteration(True) test.clean() self._mask_data = np.logical_or(self._mask_data, np.array(test.mask, dtype='bool')) def set_mode(self, mode): """ sets the mode to unmask or mask which equals mode = False or True """ self.mode = mode def set_mask(self, mask_data): self.update_deque() self._mask_data = mask_data def save_mask(self, filename): im_array = np.int8(self.get_img()) im = Image.fromarray(im_array) try: im.save(filename, "tiff", compression="tiff_deflate") except OSError: try: im.save(filename, "tiff", compression="tiff_adobe_deflate") except IOError: im.save(filename, "tiff") self.filename = filename def load_mask(self, filename): try: data = np.array(Image.open(filename)) except IOError: data = np.loadtxt(filename) if self.mask_dimension == data.shape: self.filename = filename self.mask_dimension = data.shape self.reset_dimension() self.set_mask(data) return True return False def add_mask(self, filename): try: data = np.array(Image.open(filename)) except IOError: data = np.loadtxt(filename) if self.get_mask().shape == data.shape: self._add_mask(data) return True return False def _add_mask(self, mask_data): self.update_deque() self._mask_data = np.logical_or(self._mask_data, np.array(mask_data, dtype='bool')) def find_center_of_circle_from_three_points(self, a, b, c): xa, ya = a.x(), a.y() xb, yb = b.x(), b.y() xc, yc = c.x(), c.y() # if (xa == xb and ya == yb) or (xa == xc and ya == yc) or (xb == xc and yb == yc): # return None mid_ab_x = (xa + xb) / 2.0 mid_ab_y = (ya + yb) / 2.0 mid_bc_x = (xb + xc) / 2.0 mid_bc_y = (yb + yc) / 2.0 slope_ab = (yb - ya) / (xb - xa) slope_bc = (yc - yb) / (xc - xb) slope_p_ab = -1.0 / slope_ab slope_p_bc = -1.0 / slope_bc b_p_ab = mid_ab_y - slope_p_ab * mid_ab_x b_p_bc = mid_bc_y - slope_p_bc * mid_bc_x x0 = (b_p_bc - b_p_ab) / (slope_p_ab - slope_p_bc) y0 = slope_p_ab * x0 + b_p_ab self.center_for_arc = QtCore.QPointF(x0, y0) return self.center_for_arc @staticmethod def find_radius_of_circle_from_center_and_point(p0, a): r = sqrt((a.x() - p0.x()) ** 2 + (a.y() - p0.y()) ** 2) return r def find_n_angles_on_arc_from_three_points_around_p0(self, p0, pa, pb, pc, n): phi_a = self.calc_angle_from_center_and_point(p0, pa) phi_b = self.calc_angle_from_center_and_point(p0, pb) phi_c = self.calc_angle_from_center_and_point(p0, pc) if phi_c < phi_a < phi_b or phi_b < phi_c < phi_a: phi_range = np.linspace(phi_a, phi_c + 2 * np.pi, n) elif phi_a < phi_b < phi_c or phi_c < phi_b < phi_a: phi_range = np.linspace(phi_a, phi_c, n) elif phi_a < phi_c < phi_b or phi_b < phi_a < phi_c: phi_range = np.linspace(phi_a + 2 * np.pi, phi_c, n) else: return None return phi_range @staticmethod def calc_angle_from_center_and_point(p0, pa): phi = atan2(pa.y() - p0.y(), pa.x() - p0.x()) return phi @staticmethod def calc_arc_points_from_angles(p0, r, width, phi_range): p = [] for phi in phi_range: xn = p0.x() + (r - width) * cos(phi) yn = p0.y() + (r - width) * sin(phi) p.append(QtCore.QPointF(xn, yn)) return p
gpl-3.0
craigcitro/apitools
apitools/base/py/compression_test.py
8
5319
#!/usr/bin/env python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for compression.""" from apitools.base.py import compression from apitools.base.py import gzip import six import unittest2 class CompressionTest(unittest2.TestCase): def setUp(self): # Sample highly compressible data (~50MB). self.sample_data = b'abc' * 16777216 # Stream of the sample data. self.stream = six.BytesIO() self.stream.write(self.sample_data) self.length = self.stream.tell() self.stream.seek(0) def testCompressionExhausted(self): """Test full compression. Test that highly compressible data is actually compressed in entirety. """ output, read, exhausted = compression.CompressStream( self.stream, self.length, 9) # Ensure the compressed buffer is smaller than the input buffer. self.assertLess(output.length, self.length) # Ensure we read the entire input stream. self.assertEqual(read, self.length) # Ensure the input stream was exhausted. self.assertTrue(exhausted) def testCompressionUnbounded(self): """Test unbounded compression. Test that the input stream is exhausted when length is none. """ output, read, exhausted = compression.CompressStream( self.stream, None, 9) # Ensure the compressed buffer is smaller than the input buffer. self.assertLess(output.length, self.length) # Ensure we read the entire input stream. self.assertEqual(read, self.length) # Ensure the input stream was exhausted. self.assertTrue(exhausted) def testCompressionPartial(self): """Test partial compression. Test that the length parameter works correctly. The amount of data that's compressed can be greater than or equal to the requested length. """ output_length = 40 output, _, exhausted = compression.CompressStream( self.stream, output_length, 9) # Ensure the requested read size is <= the compressed buffer size. self.assertLessEqual(output_length, output.length) # Ensure the input stream was not exhausted. self.assertFalse(exhausted) def testCompressionIntegrity(self): """Test that compressed data can be decompressed.""" output, read, exhausted = compression.CompressStream( self.stream, self.length, 9) # Ensure uncompressed data matches the sample data. with gzip.GzipFile(fileobj=output) as f: original = f.read() self.assertEqual(original, self.sample_data) # Ensure we read the entire input stream. self.assertEqual(read, self.length) # Ensure the input stream was exhausted. self.assertTrue(exhausted) class StreamingBufferTest(unittest2.TestCase): def setUp(self): self.stream = compression.StreamingBuffer() def testSimpleStream(self): """Test simple stream operations. Test that the stream can be written to and read from. Also test that reading from the stream consumes the bytes. """ # Ensure the stream is empty. self.assertEqual(self.stream.length, 0) # Ensure data is correctly written. self.stream.write(b'Sample data') self.assertEqual(self.stream.length, 11) # Ensure data can be read and the read data is purged from the stream. data = self.stream.read(11) self.assertEqual(data, b'Sample data') self.assertEqual(self.stream.length, 0) def testPartialReads(self): """Test partial stream reads. Test that the stream can be read in chunks while perserving the consumption mechanics. """ self.stream.write(b'Sample data') # Ensure data can be read and the read data is purged from the stream. data = self.stream.read(6) self.assertEqual(data, b'Sample') self.assertEqual(self.stream.length, 5) # Ensure the remaining data can be read. data = self.stream.read(5) self.assertEqual(data, b' data') self.assertEqual(self.stream.length, 0) def testTooShort(self): """Test excessive stream reads. Test that more data can be requested from the stream than available without raising an exception. """ self.stream.write(b'Sample') # Ensure requesting more data than available does not raise an # exception. data = self.stream.read(100) self.assertEqual(data, b'Sample') self.assertEqual(self.stream.length, 0)
apache-2.0
seslattery/django-sample-app
bin/fabfile.py
1
3028
from fabric.api import cd, run, env, local, sudo, require from fabric.operations import _prefix_commands, _prefix_env_vars from lib.fabric_helpers import * import os import string env.hosts = ['djtut2.example.com'] env.code_dir = '/srv/www/djtut2' env.virtualenv = '/srv/www/djtut2/.virtualenv' env.code_repo = '[email protected]:user/djtut2.git' env.django_settings_module = 'djtut2.settings' def run_tests(): """ Runs the Django test suite as is. """ local("./manage.py test") def deploy_static(): with cd(env.code_dir): run('./manage.py collectstatic -v0 --noinput') def uname(): """ Prints information about the host. """ run("uname -a") def push(): """ Push new code and pull on all hosts """ local('git push origin master') with cd(env.code_dir): run('git pull origin master') def update_requirements(): """ Update requirements in the virtualenv. """ run("%s/bin/pip install -r %s/requirements/prod.txt" % (env.virtualenv, env.code_dir)) def migrate(app=None): """ Run the migrate task Usage: fab migrate:app_name """ if app: run("source %s/bin/activate; django-admin.py migrate %s --settings=%s" % (env.virtualenv, app, env.django_settings_module)) else: run("source %s/bin/activate; django-admin.py migrate --settings=%s" % (env.virtualenv, env.django_settings_module)) def version(): """ Show last commit to the deployed repo. """ with cd(env.code_dir): run('git log -1') def restart(): """ Restart the wsgi process """ with cd(env.code_dir): run("touch %s/djtut2/wsgi.py" % env.code_dir) def ve_run(cmd): """ Helper function. Runs a command using the virtualenv environment """ require('root') return sshagent_run('source %s/bin/activate; %s' % (env.virtualenv, cmd)) def sshagent_run(cmd): """ Helper function. Runs a command with SSH agent forwarding enabled. Note:: Fabric (and paramiko) can't forward your SSH agent. This helper uses your system's ssh to do so. """ # Handle context manager modifications wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote') try: host, port = env.host_string.split(':') return local( "ssh -p %s -A %s@%s '%s'" % (port, env.user, host, wrapped_cmd) ) except ValueError: return local( "ssh -A %s@%s '%s'" % (env.user, env.host_string, wrapped_cmd) ) def deploy(): """ Update the remote deployment, update the virtualenv, perform any pending migrations, then restart the wsgi process """ push() update_requirements() migrate() restart() def clone(): """ Clone the repository for the first time """ with cd(env.code_dir): run('git clone %s .' % (env.code_repo)) def bootstrap(): """ Bootstrap the initial deploy environment, then deploy """ run('mkdir %s' % (env.code_dir)) run('virtualenv %s' % (env.virtualenv)) clone() deploy()
bsd-3-clause
Ademan/NumPy-GSoC
numpy/core/tests/test_umath.py
3
38130
import sys from numpy.testing import * import numpy.core.umath as ncu import numpy as np class TestDivision(TestCase): def test_division_int(self): # int division should follow Python x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) if 5 / 10 == 0.5: assert_equal(x / 100, [0.05, 0.1, 0.9, 1, -0.05, -0.1, -0.9, -1, -1.2]) else: assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) def test_division_complex(self): # check that implementation is correct msg = "Complex division implementation check" x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) assert_almost_equal(x**2/x, x, err_msg=msg) # check overflow, underflow msg = "Complex division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) y = x**2/x assert_almost_equal(y/x, [1, 1], err_msg=msg) def test_floor_division_complex(self): # check that implementation is correct msg = "Complex floor division implementation check" x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) y = np.array([0., -1., 0., 0.], dtype=np.complex128) assert_equal(np.floor_divide(x**2,x), y, err_msg=msg) # check overflow, underflow msg = "Complex floor division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) y = np.floor_divide(x**2, x) assert_equal(y, [1.e+110, 0], err_msg=msg) class TestPower(TestCase): def test_power_float(self): x = np.array([1., 2., 3.]) assert_equal(x**0, [1., 1., 1.]) assert_equal(x**1, x) assert_equal(x**2, [1., 4., 9.]) y = x.copy() y **= 2 assert_equal(y, [1., 4., 9.]) assert_almost_equal(x**(-1), [1., 0.5, 1./3]) assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) def test_power_complex(self): x = np.array([1+2j, 2+3j, 3+4j]) assert_equal(x**0, [1., 1., 1.]) assert_equal(x**1, x) assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, (-117-44j)/15625]) assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), ncu.sqrt(3+4j)]) norm = 1./((x**14)[0]) assert_almost_equal(x**14 * norm, [i * norm for i in [-76443+16124j, 23161315+58317492j, 5583548873 + 2465133864j]]) # Ticket #836 def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) for z in [complex(0, np.inf), complex(1, np.inf)]: err = np.seterr(invalid="ignore") z = np.array([z], dtype=np.complex_) try: assert_complex_equal(z**1, z) assert_complex_equal(z**2, z*z) assert_complex_equal(z**3, z*z*z) finally: np.seterr(**err) class TestLog2(TestCase): def test_log2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.log2(xf), yf) class TestExp2(TestCase): def test_exp2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.exp2(yf), xf) class TestLogAddExp2(object): # Need test for intermediate precisions def test_logaddexp2_values(self) : x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] for dt, dec in zip(['f','d','g'],[6, 15, 15]) : xf = np.log2(np.array(x, dtype=dt)) yf = np.log2(np.array(y, dtype=dt)) zf = np.log2(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) def test_logaddexp2_range(self) : x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) def test_inf(self) : err = np.seterr(invalid='ignore') inf = np.inf x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] try: for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_equal(np.logaddexp2(logxf, logyf), logzf) finally: np.seterr(**err) def test_nan(self): assert np.isnan(np.logaddexp2(np.nan, np.inf)) assert np.isnan(np.logaddexp2(np.inf, np.nan)) assert np.isnan(np.logaddexp2(np.nan, 0)) assert np.isnan(np.logaddexp2(0, np.nan)) assert np.isnan(np.logaddexp2(np.nan, np.nan)) class TestLog(TestCase): def test_log_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ assert_almost_equal(np.log(xf), yf) class TestExp(TestCase): def test_exp_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ assert_almost_equal(np.exp(yf), xf) class TestLogAddExp(object): def test_logaddexp_values(self) : x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] for dt, dec in zip(['f','d','g'],[6, 15, 15]) : xf = np.log(np.array(x, dtype=dt)) yf = np.log(np.array(y, dtype=dt)) zf = np.log(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) def test_logaddexp_range(self) : x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf) def test_inf(self) : err = np.seterr(invalid='ignore') inf = np.inf x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] try: for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_equal(np.logaddexp(logxf, logyf), logzf) finally: np.seterr(**err) def test_nan(self): assert np.isnan(np.logaddexp(np.nan, np.inf)) assert np.isnan(np.logaddexp(np.inf, np.nan)) assert np.isnan(np.logaddexp(np.nan, 0)) assert np.isnan(np.logaddexp(0, np.nan)) assert np.isnan(np.logaddexp(np.nan, np.nan)) class TestLog1p(TestCase): def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) class TestExpm1(TestCase): def test_expm1(self): assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) class TestHypot(TestCase, object): def test_simple(self): assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) assert_almost_equal(ncu.hypot(0, 0), 0) def assert_hypot_isnan(x, y): err = np.seterr(invalid='ignore') try: assert np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)) finally: np.seterr(**err) def assert_hypot_isinf(x, y): err = np.seterr(invalid='ignore') try: assert np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)) finally: np.seterr(**err) class TestHypotSpecialValues(TestCase): def test_nan_outputs(self): assert_hypot_isnan(np.nan, np.nan) assert_hypot_isnan(np.nan, 1) def test_nan_outputs(self): assert_hypot_isinf(np.nan, np.inf) assert_hypot_isinf(np.inf, np.nan) assert_hypot_isinf(np.inf, 0) assert_hypot_isinf(0, np.inf) def assert_arctan2_isnan(x, y): assert np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)) def assert_arctan2_ispinf(x, y): assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)) def assert_arctan2_isninf(x, y): assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)) def assert_arctan2_ispzero(x, y): assert (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)) def assert_arctan2_isnzero(x, y): assert (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)) class TestArctan2SpecialValues(TestCase): def test_one_one(self): # atan2(1, 1) returns pi/4. assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) def test_zero_nzero(self): # atan2(+-0, -0) returns +-pi. assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) def test_zero_pzero(self): # atan2(+-0, +0) returns +-0. assert_arctan2_ispzero(np.PZERO, np.PZERO) assert_arctan2_isnzero(np.NZERO, np.PZERO) def test_zero_negative(self): # atan2(+-0, x) returns +-pi for x < 0. assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) def test_zero_positive(self): # atan2(+-0, x) returns +-0 for x > 0. assert_arctan2_ispzero(np.PZERO, 1) assert_arctan2_isnzero(np.NZERO, 1) def test_positive_zero(self): # atan2(y, +-0) returns +pi/2 for y > 0. assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) def test_negative_zero(self): # atan2(y, +-0) returns -pi/2 for y < 0. assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) def test_any_ninf(self): # atan2(+-y, -infinity) returns +-pi for finite y > 0. assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) def test_any_pinf(self): # atan2(+-y, +infinity) returns +-0 for finite y > 0. assert_arctan2_ispzero(1, np.inf) assert_arctan2_isnzero(-1, np.inf) def test_inf_any(self): # atan2(+-infinity, x) returns +-pi/2 for finite x. assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) def test_inf_ninf(self): # atan2(+-infinity, -infinity) returns +-3*pi/4. assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) def test_inf_pinf(self): # atan2(+-infinity, +infinity) returns +-pi/4. assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) def test_nan_any(self): # atan2(nan, x) returns nan for any x, including inf assert_arctan2_isnan(np.nan, np.inf) assert_arctan2_isnan(np.inf, np.nan) assert_arctan2_isnan(np.nan, np.nan) class TestLdexp(TestCase): def test_ldexp(self): assert_almost_equal(ncu.ldexp(2., 3), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int32)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int32)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int32)), 16.) class TestMaximum(TestCase): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1,2j]),1) assert_equal(np.maximum.reduce([1+3j,2j]),1+3j) def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([nan, nan, nan]) assert_equal(np.maximum(arg1, arg2), out) def test_complex_nans(self): nan = np.nan for cnan in [nan, nan*1j, nan + nan*1j] : arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([nan, nan, nan], dtype=np.complex) assert_equal(np.maximum(arg1, arg2), out) class TestMinimum(TestCase): def test_reduce_complex(self): assert_equal(np.minimum.reduce([1,2j]),2j) assert_equal(np.minimum.reduce([1+3j,2j]),2j) def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([nan, nan, nan]) assert_equal(np.minimum(arg1, arg2), out) def test_complex_nans(self): nan = np.nan for cnan in [nan, nan*1j, nan + nan*1j] : arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([nan, nan, nan], dtype=np.complex) assert_equal(np.minimum(arg1, arg2), out) class TestFmax(TestCase): def test_reduce_complex(self): assert_equal(np.fmax.reduce([1,2j]),1) assert_equal(np.fmax.reduce([1+3j,2j]),1+3j) def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([0, 0, nan]) assert_equal(np.fmax(arg1, arg2), out) def test_complex_nans(self): nan = np.nan for cnan in [nan, nan*1j, nan + nan*1j] : arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([0, 0, nan], dtype=np.complex) assert_equal(np.fmax(arg1, arg2), out) class TestFmin(TestCase): def test_reduce_complex(self): assert_equal(np.fmin.reduce([1,2j]),2j) assert_equal(np.fmin.reduce([1+3j,2j]),2j) def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([0, 0, nan]) assert_equal(np.fmin(arg1, arg2), out) def test_complex_nans(self): nan = np.nan for cnan in [nan, nan*1j, nan + nan*1j] : arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([0, 0, nan], dtype=np.complex) assert_equal(np.fmin(arg1, arg2), out) class TestFloatingPoint(TestCase): def test_floating_point(self): assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) class TestDegrees(TestCase): def test_degrees(self): assert_almost_equal(ncu.degrees(np.pi), 180.0) assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) class TestRadians(TestCase): def test_radians(self): assert_almost_equal(ncu.radians(180.0), np.pi) assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) class TestSign(TestCase): def test_sign(self): a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) out = np.zeros(a.shape) tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) olderr = np.seterr(invalid='ignore') try: res = ncu.sign(a) assert_equal(res, tgt) res = ncu.sign(a, out) assert_equal(res, tgt) assert_equal(out, tgt) finally: np.seterr(**olderr) class TestSpecialMethods(TestCase): def test_wrap(self): class with_wrap(object): def __array__(self): return np.zeros(1) def __array_wrap__(self, arr, context): r = with_wrap() r.arr = arr r.context = context return r a = with_wrap() x = ncu.minimum(a, a) assert_equal(x.arr, np.zeros(1)) func, args, i = x.context self.assertTrue(func is ncu.minimum) self.assertEqual(len(args), 2) assert_equal(args[0], a) assert_equal(args[1], a) self.assertEqual(i, 0) def test_wrap_with_iterable(self): # test fix for bug #1026: class with_wrap(np.ndarray): __array_priority__ = 10 def __new__(cls): return np.asarray(1).view(cls).copy() def __array_wrap__(self, arr, context): return arr.view(type(self)) a = with_wrap() x = ncu.multiply(a, (1, 2, 3)) self.assertTrue(isinstance(x, with_wrap)) assert_array_equal(x, np.array((1, 2, 3))) def test_priority_with_scalar(self): # test fix for bug #826: class A(np.ndarray): __array_priority__ = 10 def __new__(cls): return np.asarray(1.0, 'float64').view(cls).copy() a = A() x = np.float64(1)*a self.assertTrue(isinstance(x, A)) assert_array_equal(x, np.array(1)) def test_old_wrap(self): class with_wrap(object): def __array__(self): return np.zeros(1) def __array_wrap__(self, arr): r = with_wrap() r.arr = arr return r a = with_wrap() x = ncu.minimum(a, a) assert_equal(x.arr, np.zeros(1)) def test_priority(self): class A(object): def __array__(self): return np.zeros(1) def __array_wrap__(self, arr, context): r = type(self)() r.arr = arr r.context = context return r class B(A): __array_priority__ = 20. class C(A): __array_priority__ = 40. x = np.zeros(1) a = A() b = B() c = C() f = ncu.minimum self.assertTrue(type(f(x,x)) is np.ndarray) self.assertTrue(type(f(x,a)) is A) self.assertTrue(type(f(x,b)) is B) self.assertTrue(type(f(x,c)) is C) self.assertTrue(type(f(a,x)) is A) self.assertTrue(type(f(b,x)) is B) self.assertTrue(type(f(c,x)) is C) self.assertTrue(type(f(a,a)) is A) self.assertTrue(type(f(a,b)) is B) self.assertTrue(type(f(b,a)) is B) self.assertTrue(type(f(b,b)) is B) self.assertTrue(type(f(b,c)) is C) self.assertTrue(type(f(c,b)) is C) self.assertTrue(type(f(c,c)) is C) self.assertTrue(type(ncu.exp(a) is A)) self.assertTrue(type(ncu.exp(b) is B)) self.assertTrue(type(ncu.exp(c) is C)) def test_failing_wrap(self): class A(object): def __array__(self): return np.zeros(1) def __array_wrap__(self, arr, context): raise RuntimeError a = A() self.assertRaises(RuntimeError, ncu.maximum, a, a) def test_default_prepare(self): class with_wrap(object): __array_priority__ = 10 def __array__(self): return np.zeros(1) def __array_wrap__(self, arr, context): return arr a = with_wrap() x = ncu.minimum(a, a) assert_equal(x, np.zeros(1)) assert_equal(type(x), np.ndarray) def test_prepare(self): class with_prepare(np.ndarray): __array_priority__ = 10 def __array_prepare__(self, arr, context): # make sure we can return a new return np.array(arr).view(type=with_prepare) a = np.array(1).view(type=with_prepare) x = np.add(a, a) assert_equal(x, np.array(2)) assert_equal(type(x), with_prepare) def test_failing_prepare(self): class A(object): def __array__(self): return np.zeros(1) def __array_prepare__(self, arr, context=None): raise RuntimeError a = A() self.assertRaises(RuntimeError, ncu.maximum, a, a) def test_array_with_context(self): class A(object): def __array__(self, dtype=None, context=None): func, args, i = context self.func = func self.args = args self.i = i return np.zeros(1) class B(object): def __array__(self, dtype=None): return np.zeros(1, dtype) class C(object): def __array__(self): return np.zeros(1) a = A() ncu.maximum(np.zeros(1), a) self.assertTrue(a.func is ncu.maximum) assert_equal(a.args[0], 0) self.assertTrue(a.args[1] is a) self.assertTrue(a.i == 1) assert_equal(ncu.maximum(a, B()), 0) assert_equal(ncu.maximum(a, C()), 0) class TestChoose(TestCase): def test_mixed(self): c = np.array([True,True]) a = np.array([True,True]) assert_equal(np.choose(c, (a, 1)), np.array([1,1])) def is_longdouble_finfo_bogus(): info = np.finfo(np.longcomplex) return not np.isfinite(np.log10(info.tiny/info.eps)) class TestComplexFunctions(object): funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, np.arctanh, np.sin, np.cos, np.tan, np.exp, np.exp2, np.log, np.sqrt, np.log10, np.log2, np.log1p] def test_it(self): for f in self.funcs: if f is np.arccosh : x = 1.5 else : x = .5 fr = f(x) fz = f(np.complex(x)) assert_almost_equal(fz.real, fr, err_msg='real part %s'%f) assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f) def test_precisions_consistent(self) : z = 1 + 1j for f in self.funcs : fcf = f(np.csingle(z)) fcd = f(np.cdouble(z)) fcl = f(np.clongdouble(z)) assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f) assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f) def test_branch_cuts(self): # check branch cuts and continuity on them yield _check_branch_cut, np.log, -0.5, 1j, 1, -1 yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1 yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1 yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1 yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1 yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1 yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1 yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1 yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1 yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1 yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1 # check against bogus branch cuts: assert continuity between quadrants yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1 yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1 yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1 yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1 yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1 @dec.knownfailureif(True, "These branch cuts are known to fail") def test_branch_cuts_failing(self): # XXX: signed zero not OK with ICC on 64-bit platform for log, see # http://permalink.gmane.org/gmane.comp.python.numeric.general/25335 yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True # XXX: signed zeros are not OK for sqrt or for the arc* functions yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True def test_against_cmath(self): import cmath, sys # cmath.asinh is broken in some versions of Python, see # http://bugs.python.org/issue1381 broken_cmath_asinh = False if sys.version_info < (2,6): broken_cmath_asinh = True points = [-1-1j, -1+1j, +1-1j, +1+1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} atol = 4*np.finfo(np.complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) try: cfunc = getattr(cmath, cname) except AttributeError: continue for p in points: a = complex(func(np.complex_(p))) b = cfunc(p) if cname == 'asinh' and broken_cmath_asinh: continue assert abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b) def check_loss_of_precision(self, dtype): """Check loss of precision in complex arc* functions""" # Check against known-good functions info = np.finfo(dtype) real_dtype = dtype(0.).real.dtype eps = info.eps def check(x, rtol): x = x.astype(real_dtype) z = x.astype(dtype) d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsinh') z = (1j*x).astype(dtype) d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsin') z = x.astype(dtype) d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctanh') z = (1j*x).astype(dtype) d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctan') # The switchover was chosen as 1e-3; hence there can be up to # ~eps/1e-3 of relative cancellation error before it x_series = np.logspace(-20, -3.001, 200) x_basic = np.logspace(-2.999, 0, 10, endpoint=False) if dtype is np.longcomplex: # It's not guaranteed that the system-provided arc functions # are accurate down to a few epsilons. (Eg. on Linux 64-bit) # So, give more leeway for long complex tests here: check(x_series, 50*eps) else: check(x_series, 2*eps) check(x_basic, 2*eps/1e-3) # Check a few points z = np.array([1e-5*(1+1j)], dtype=dtype) p = 9.999999999333333333e-6 + 1.000000000066666666e-5j d = np.absolute(1-np.arctanh(z)/p) assert np.all(d < 1e-15) p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j d = np.absolute(1-np.arcsinh(z)/p) assert np.all(d < 1e-15) p = 9.999999999333333333e-6j + 1.000000000066666666e-5 d = np.absolute(1-np.arctan(z)/p) assert np.all(d < 1e-15) p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 d = np.absolute(1-np.arcsin(z)/p) assert np.all(d < 1e-15) # Check continuity across switchover points def check(func, z0, d=1): z0 = np.asarray(z0, dtype=dtype) zp = z0 + abs(z0) * d * eps * 2 zm = z0 - abs(z0) * d * eps * 2 assert np.all(zp != zm), (zp, zm) # NB: the cancellation error at the switchover is at least eps good = (abs(func(zp) - func(zm)) < 2*eps) assert np.all(good), (func, z0[~good]) for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan): pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3) if rp != 0 or ip != 0] check(func, pts, 1) check(func, pts, 1j) check(func, pts, 1+1j) def test_loss_of_precision(self): for dtype in [np.complex64, np.complex_]: yield self.check_loss_of_precision, dtype @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo") def test_loss_of_precision_longcomplex(self): self.check_loss_of_precision(np.longcomplex) class TestAttributes(TestCase): def test_attributes(self): add = ncu.add assert_equal(add.__name__, 'add') assert add.__doc__.startswith('add(x1, x2[, out])\n\n') self.assertTrue(add.ntypes >= 18) # don't fail if types added self.assertTrue('ii->i' in add.types) assert_equal(add.nin, 2) assert_equal(add.nout, 1) assert_equal(add.identity, 0) class TestSubclass(TestCase): def test_subclass_op(self): class simple(np.ndarray): def __new__(subtype, shape): self = np.ndarray.__new__(subtype, shape, dtype=object) self.fill(0) return self a = simple((3,4)) assert_equal(a+a, a) def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, dtype=np.complex): """ Check for a branch cut in a function. Assert that `x0` lies on a branch cut of function `f` and `f` is continuous from the direction `dx`. Parameters ---------- f : func Function to check x0 : array-like Point on branch cut dx : array-like Direction to check continuity in re_sign, im_sign : {1, -1} Change of sign of the real or imaginary part expected sig_zero_ok : bool Whether to check if the branch cut respects signed zero (if applicable) dtype : dtype Dtype to check (should be complex) """ x0 = np.atleast_1d(x0).astype(dtype) dx = np.atleast_1d(dx).astype(dtype) scale = np.finfo(dtype).eps * 1e3 atol = 1e-4 y0 = f(x0) yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) assert np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp) assert np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp) assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym) assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym) if sig_zero_ok: # check that signed zeros also work as a displacement jr = (x0.real == 0) & (dx.real != 0) ji = (x0.imag == 0) & (dx.imag != 0) x = -x0 x.real[jr] = 0.*dx.real x.imag[ji] = 0.*dx.imag x = -x ym = f(x) ym = ym[jr | ji] y0 = y0[jr | ji] assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym) assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym) def test_copysign(): assert np.copysign(1, -1) == -1 old_err = np.seterr(divide="ignore") try: assert 1 / np.copysign(0, -1) < 0 assert 1 / np.copysign(0, 1) > 0 finally: np.seterr(**old_err) assert np.signbit(np.copysign(np.nan, -1)) assert not np.signbit(np.copysign(np.nan, 1)) def _test_nextafter(t): one = t(1) two = t(2) zero = t(0) eps = np.finfo(t).eps assert np.nextafter(one, two) - one == eps assert np.nextafter(one, zero) - one < 0 assert np.isnan(np.nextafter(np.nan, one)) assert np.isnan(np.nextafter(one, np.nan)) assert np.nextafter(one, one) == one def test_nextafter(): return _test_nextafter(np.float64) def test_nextafterf(): return _test_nextafter(np.float32) @dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32") def test_nextafterl(): return _test_nextafter(np.longdouble) def _test_spacing(t): err = np.seterr(invalid='ignore') one = t(1) eps = np.finfo(t).eps nan = t(np.nan) inf = t(np.inf) try: assert np.spacing(one) == eps assert np.isnan(np.spacing(nan)) assert np.isnan(np.spacing(inf)) assert np.isnan(np.spacing(-inf)) assert np.spacing(t(1e30)) != 0 finally: np.seterr(**err) def test_spacing(): return _test_spacing(np.float64) def test_spacingf(): return _test_spacing(np.float32) @dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32") def test_spacingl(): return _test_spacing(np.longdouble) def test_spacing_gfortran(): # Reference from this fortran file, built with gfortran 4.3.3 on linux # 32bits: # PROGRAM test_spacing # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) # # WRITE(*,*) spacing(0.00001_DBL) # WRITE(*,*) spacing(1.0_DBL) # WRITE(*,*) spacing(1000._DBL) # WRITE(*,*) spacing(10500._DBL) # # WRITE(*,*) spacing(0.00001_SGL) # WRITE(*,*) spacing(1.0_SGL) # WRITE(*,*) spacing(1000._SGL) # WRITE(*,*) spacing(10500._SGL) # END PROGRAM ref = {} ref[np.float64] = [1.69406589450860068E-021, 2.22044604925031308E-016, 1.13686837721616030E-013, 1.81898940354585648E-012] ref[np.float32] = [ 9.09494702E-13, 1.19209290E-07, 6.10351563E-05, 9.76562500E-04] for dt, dec in zip([np.float32, np.float64], (10, 20)): x = np.array([1e-5, 1, 1000, 10500], dtype=dt) assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec) def test_nextafter_vs_spacing(): # XXX: spacing does not handle long double yet for t in [np.float32, np.float64]: for _f in [1, 1e-5, 1000]: f = t(_f) f1 = t(_f + 1) assert np.nextafter(f, f1) - f == np.spacing(f) def test_pos_nan(): """Check np.nan is a positive nan.""" assert np.signbit(np.nan) == 0 def test_reduceat(): """Test bug in reduceat when structured arrays are not copied.""" db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)]) a = np.empty([100], dtype=db) a['name'] = 'Simple' a['time'] = 10 a['value'] = 100 indx = [0,7,15,25] h2 = [] val1 = indx[0] for val2 in indx[1:]: h2.append(np.add.reduce(a['value'][val1:val2])) val1 = val2 h2.append(np.add.reduce(a['value'][val1:])) h2 = np.array(h2) # test buffered -- this should work h1 = np.add.reduceat(a['value'], indx) assert_array_almost_equal(h1, h2) # This is when the error occurs. # test no buffer res = np.setbufsize(32) h1 = np.add.reduceat(a['value'], indx) np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) if __name__ == "__main__": run_module_suite()
bsd-3-clause
eligoenergy/git-repo
subcmds/overview.py
83
2727
# # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from color import Coloring from command import PagedCommand class Overview(PagedCommand): common = True helpSummary = "Display overview of unmerged project branches" helpUsage = """ %prog [--current-branch] [<project>...] """ helpDescription = """ The '%prog' command is used to display an overview of the projects branches, and list any local commits that have not yet been merged into the project. The -b/--current-branch option can be used to restrict the output to only branches currently checked out in each project. By default, all branches are displayed. """ def _Options(self, p): p.add_option('-b', '--current-branch', dest="current_branch", action="store_true", help="Consider only checked out branches") def Execute(self, opt, args): all_branches = [] for project in self.GetProjects(args): br = [project.GetUploadableBranch(x) for x in project.GetBranches()] br = [x for x in br if x] if opt.current_branch: br = [x for x in br if x.name == project.CurrentBranch] all_branches.extend(br) if not all_branches: return class Report(Coloring): def __init__(self, config): Coloring.__init__(self, config, 'status') self.project = self.printer('header', attr='bold') self.text = self.printer('text') out = Report(all_branches[0].project.config) out.text("Deprecated. See repo info -o.") out.nl() out.project('Projects Overview') out.nl() project = None for branch in all_branches: if project != branch.project: project = branch.project out.nl() out.project('project %s/' % project.relpath) out.nl() commits = branch.commits date = branch.date print('%s %-33s (%2d commit%s, %s)' % ( branch.name == project.CurrentBranch and '*' or ' ', branch.name, len(commits), len(commits) != 1 and 's' or ' ', date)) for commit in commits: print('%-35s - %s' % ('', commit))
apache-2.0
cfei18/incubator-airflow
tests/www/api/experimental/test_endpoints.py
15
11876
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import timedelta import json import unittest from urllib.parse import quote_plus from airflow import configuration from airflow.api.common.experimental.trigger_dag import trigger_dag from airflow.models import DagBag, DagModel, DagRun, Pool, TaskInstance from airflow.settings import Session from airflow.utils.timezone import datetime, utcnow from airflow.www import app as application class TestApiExperimental(unittest.TestCase): @classmethod def setUpClass(cls): super(TestApiExperimental, cls).setUpClass() session = Session() session.query(DagRun).delete() session.query(TaskInstance).delete() session.commit() session.close() def setUp(self): super(TestApiExperimental, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) self.app = app.test_client() def tearDown(self): session = Session() session.query(DagRun).delete() session.query(TaskInstance).delete() session.commit() session.close() super(TestApiExperimental, self).tearDown() def test_task_info(self): url_template = '/api/experimental/dags/{}/tasks/{}' response = self.app.get( url_template.format('example_bash_operator', 'runme_0') ) self.assertIn('"email"', response.data.decode('utf-8')) self.assertNotIn('error', response.data.decode('utf-8')) self.assertEqual(200, response.status_code) response = self.app.get( url_template.format('example_bash_operator', 'DNE') ) self.assertIn('error', response.data.decode('utf-8')) self.assertEqual(404, response.status_code) response = self.app.get( url_template.format('DNE', 'DNE') ) self.assertIn('error', response.data.decode('utf-8')) self.assertEqual(404, response.status_code) def test_trigger_dag(self): url_template = '/api/experimental/dags/{}/dag_runs' response = self.app.post( url_template.format('example_bash_operator'), data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}), content_type="application/json" ) self.assertEqual(200, response.status_code) response = self.app.post( url_template.format('does_not_exist_dag'), data=json.dumps({}), content_type="application/json" ) self.assertEqual(404, response.status_code) def test_delete_dag(self): url_template = '/api/experimental/dags/{}' from airflow import settings session = settings.Session() key = "my_dag_id" session.add(DagModel(dag_id=key)) session.commit() response = self.app.delete( url_template.format(key), content_type="application/json" ) self.assertEqual(200, response.status_code) response = self.app.delete( url_template.format('does_not_exist_dag'), content_type="application/json" ) self.assertEqual(404, response.status_code) def test_trigger_dag_for_date(self): url_template = '/api/experimental/dags/{}/dag_runs' dag_id = 'example_bash_operator' hour_from_now = utcnow() + timedelta(hours=1) execution_date = datetime(hour_from_now.year, hour_from_now.month, hour_from_now.day, hour_from_now.hour) datetime_string = execution_date.isoformat() # Test Correct execution response = self.app.post( url_template.format(dag_id), data=json.dumps({'execution_date': datetime_string}), content_type="application/json" ) self.assertEqual(200, response.status_code) dagbag = DagBag() dag = dagbag.get_dag(dag_id) dag_run = dag.get_dagrun(execution_date) self.assertTrue(dag_run, 'Dag Run not found for execution date {}' .format(execution_date)) # Test error for nonexistent dag response = self.app.post( url_template.format('does_not_exist_dag'), data=json.dumps({'execution_date': execution_date.isoformat()}), content_type="application/json" ) self.assertEqual(404, response.status_code) # Test error for bad datetime format response = self.app.post( url_template.format(dag_id), data=json.dumps({'execution_date': 'not_a_datetime'}), content_type="application/json" ) self.assertEqual(400, response.status_code) def test_task_instance_info(self): url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}' dag_id = 'example_bash_operator' task_id = 'also_run_this' execution_date = utcnow().replace(microsecond=0) datetime_string = quote_plus(execution_date.isoformat()) wrong_datetime_string = quote_plus( datetime(1990, 1, 1, 1, 1, 1).isoformat() ) # Create DagRun trigger_dag(dag_id=dag_id, run_id='test_task_instance_info_run', execution_date=execution_date) # Test Correct execution response = self.app.get( url_template.format(dag_id, datetime_string, task_id) ) self.assertEqual(200, response.status_code) self.assertIn('state', response.data.decode('utf-8')) self.assertNotIn('error', response.data.decode('utf-8')) # Test error for nonexistent dag response = self.app.get( url_template.format('does_not_exist_dag', datetime_string, task_id), ) self.assertEqual(404, response.status_code) self.assertIn('error', response.data.decode('utf-8')) # Test error for nonexistent task response = self.app.get( url_template.format(dag_id, datetime_string, 'does_not_exist_task') ) self.assertEqual(404, response.status_code) self.assertIn('error', response.data.decode('utf-8')) # Test error for nonexistent dag run (wrong execution_date) response = self.app.get( url_template.format(dag_id, wrong_datetime_string, task_id) ) self.assertEqual(404, response.status_code) self.assertIn('error', response.data.decode('utf-8')) # Test error for bad datetime format response = self.app.get( url_template.format(dag_id, 'not_a_datetime', task_id) ) self.assertEqual(400, response.status_code) self.assertIn('error', response.data.decode('utf-8')) class TestPoolApiExperimental(unittest.TestCase): @classmethod def setUpClass(cls): super(TestPoolApiExperimental, cls).setUpClass() session = Session() session.query(Pool).delete() session.commit() session.close() def setUp(self): super(TestPoolApiExperimental, self).setUp() configuration.load_test_config() app = application.create_app(testing=True) self.app = app.test_client() self.session = Session() self.pools = [] for i in range(2): name = 'experimental_%s' % (i + 1) pool = Pool( pool=name, slots=i, description=name, ) self.session.add(pool) self.pools.append(pool) self.session.commit() self.pool = self.pools[0] def tearDown(self): self.session.query(Pool).delete() self.session.commit() self.session.close() super(TestPoolApiExperimental, self).tearDown() def _get_pool_count(self): response = self.app.get('/api/experimental/pools') self.assertEqual(response.status_code, 200) return len(json.loads(response.data.decode('utf-8'))) def test_get_pool(self): response = self.app.get( '/api/experimental/pools/{}'.format(self.pool.pool), ) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.data.decode('utf-8')), self.pool.to_json()) def test_get_pool_non_existing(self): response = self.app.get('/api/experimental/pools/foo') self.assertEqual(response.status_code, 404) self.assertEqual(json.loads(response.data.decode('utf-8'))['error'], "Pool 'foo' doesn't exist") def test_get_pools(self): response = self.app.get('/api/experimental/pools') self.assertEqual(response.status_code, 200) pools = json.loads(response.data.decode('utf-8')) self.assertEqual(len(pools), 2) for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])): self.assertDictEqual(pool, self.pools[i].to_json()) def test_create_pool(self): response = self.app.post( '/api/experimental/pools', data=json.dumps({ 'name': 'foo', 'slots': 1, 'description': '', }), content_type='application/json', ) self.assertEqual(response.status_code, 200) pool = json.loads(response.data.decode('utf-8')) self.assertEqual(pool['pool'], 'foo') self.assertEqual(pool['slots'], 1) self.assertEqual(pool['description'], '') self.assertEqual(self._get_pool_count(), 3) def test_create_pool_with_bad_name(self): for name in ('', ' '): response = self.app.post( '/api/experimental/pools', data=json.dumps({ 'name': name, 'slots': 1, 'description': '', }), content_type='application/json', ) self.assertEqual(response.status_code, 400) self.assertEqual( json.loads(response.data.decode('utf-8'))['error'], "Pool name shouldn't be empty", ) self.assertEqual(self._get_pool_count(), 2) def test_delete_pool(self): response = self.app.delete( '/api/experimental/pools/{}'.format(self.pool.pool), ) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.data.decode('utf-8')), self.pool.to_json()) self.assertEqual(self._get_pool_count(), 1) def test_delete_pool_non_existing(self): response = self.app.delete( '/api/experimental/pools/foo', ) self.assertEqual(response.status_code, 404) self.assertEqual(json.loads(response.data.decode('utf-8'))['error'], "Pool 'foo' doesn't exist") if __name__ == '__main__': unittest.main()
apache-2.0
JakeBrand/CMPUT410-E3
lab4/lib/python2.7/site-packages/jinja2/testsuite/debug.py
415
1935
# -*- coding: utf-8 -*- """ jinja2.testsuite.debug ~~~~~~~~~~~~~~~~~~~~~~ Tests the debug system. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import unittest from jinja2.testsuite import JinjaTestCase, filesystem_loader from jinja2 import Environment, TemplateSyntaxError env = Environment(loader=filesystem_loader) class DebugTestCase(JinjaTestCase): def test_runtime_error(self): def test(): tmpl.render(fail=lambda: 1 / 0) tmpl = env.get_template('broken.html') self.assert_traceback_matches(test, r''' File ".*?broken.html", line 2, in (top-level template code|<module>) \{\{ fail\(\) \}\} File ".*?debug.pyc?", line \d+, in <lambda> tmpl\.render\(fail=lambda: 1 / 0\) ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero ''') def test_syntax_error(self): # XXX: the .*? is necessary for python3 which does not hide # some of the stack frames we don't want to show. Not sure # what's up with that, but that is not that critical. Should # be fixed though. self.assert_traceback_matches(lambda: env.get_template('syntaxerror.html'), r'''(?sm) File ".*?syntaxerror.html", line 4, in (template|<module>) \{% endif %\}.*? (jinja2\.exceptions\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja was looking for the following tags: 'endfor' or 'else'. The innermost block that needs to be closed is 'for'. ''') def test_regular_syntax_error(self): def test(): raise TemplateSyntaxError('wtf', 42) self.assert_traceback_matches(test, r''' File ".*debug.pyc?", line \d+, in test raise TemplateSyntaxError\('wtf', 42\) (jinja2\.exceptions\.)?TemplateSyntaxError: wtf line 42''') def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DebugTestCase)) return suite
apache-2.0
halberom/ansible-modules-extras
cloud/cloudstack/cs_loadbalancer_rule.py
31
11531
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Darren Worrall <[email protected]> # (c) 2015, René Moser <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: cs_loadbalancer_rule short_description: Manages load balancer rules on Apache CloudStack based clouds. description: - Add, update and remove load balancer rules. version_added: '2.0' author: - "Darren Worrall (@dazworrall)" - "René Moser (@resmo)" options: name: description: - The name of the load balancer rule. required: true description: description: - The description of the load balancer rule. required: false default: null algorithm: description: - Load balancer algorithm - Required when using C(state=present). required: false choices: [ 'source', 'roundrobin', 'leastconn' ] default: 'source' private_port: description: - The private port of the private ip address/virtual machine where the network traffic will be load balanced to. - Required when using C(state=present). - Can not be changed once the rule exists due API limitation. required: false default: null public_port: description: - The public port from where the network traffic will be load balanced from. - Required when using C(state=present). - Can not be changed once the rule exists due API limitation. required: true default: null ip_address: description: - Public IP address from where the network traffic will be load balanced from. required: true aliases: [ 'public_ip' ] open_firewall: description: - Whether the firewall rule for public port should be created, while creating the new rule. - Use M(cs_firewall) for managing firewall rules. required: false default: false cidr: description: - CIDR (full notation) to be used for firewall rule if required. required: false default: null protocol: description: - The protocol to be used on the load balancer required: false default: null project: description: - Name of the project the load balancer IP address is related to. required: false default: null state: description: - State of the rule. required: true default: 'present' choices: [ 'present', 'absent' ] domain: description: - Domain the rule is related to. required: false default: null account: description: - Account the rule is related to. required: false default: null zone: description: - Name of the zone in which the rule shoud be created. - If not set, default zone is used. required: false default: null extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Create a load balancer rule - local_action: module: cs_loadbalancer_rule name: balance_http public_ip: 1.2.3.4 algorithm: leastconn public_port: 80 private_port: 8080 # update algorithm of an existing load balancer rule - local_action: module: cs_loadbalancer_rule name: balance_http public_ip: 1.2.3.4 algorithm: roundrobin public_port: 80 private_port: 8080 # Delete a load balancer rule - local_action: module: cs_loadbalancer_rule name: balance_http public_ip: 1.2.3.4 state: absent ''' RETURN = ''' --- id: description: UUID of the rule. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f zone: description: Name of zone the rule is related to. returned: success type: string sample: ch-gva-2 project: description: Name of project the rule is related to. returned: success type: string sample: Production account: description: Account the rule is related to. returned: success type: string sample: example account domain: description: Domain the rule is related to. returned: success type: string sample: example domain algorithm: description: Load balancer algorithm used. returned: success type: string sample: "source" cidr: description: CIDR to forward traffic from. returned: success type: string sample: "" name: description: Name of the rule. returned: success type: string sample: "http-lb" description: description: Description of the rule. returned: success type: string sample: "http load balancer rule" protocol: description: Protocol of the rule. returned: success type: string sample: "tcp" public_port: description: Public port. returned: success type: string sample: 80 private_port: description: Private IP address. returned: success type: string sample: 80 public_ip: description: Public IP address. returned: success type: string sample: "1.2.3.4" tags: description: List of resource tags associated with the rule. returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' state: description: State of the rule. returned: success type: string sample: "Add" ''' try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True except ImportError: has_lib_cs = False # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackLBRule(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackLBRule, self).__init__(module) self.returns = { 'publicip': 'public_ip', 'algorithm': 'algorithm', 'cidrlist': 'cidr', 'protocol': 'protocol', } # these values will be casted to int self.returns_to_int = { 'publicport': 'public_port', 'privateport': 'private_port', } def get_rule(self, **kwargs): rules = self.cs.listLoadBalancerRules(**kwargs) if rules: return rules['loadbalancerrule'][0] def _get_common_args(self): return { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'zoneid': self.get_zone(key='id'), 'publicipid': self.get_ip_address(key='id'), 'name': self.module.params.get('name'), } def present_lb_rule(self): missing_params = [] for required_params in [ 'algorithm', 'private_port', 'public_port', ]: if not self.module.params.get(required_params): missing_params.append(required_params) if missing_params: self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) args = self._get_common_args() rule = self.get_rule(**args) if rule: rule = self._update_lb_rule(rule) else: rule = self._create_lb_rule(rule) if rule: rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer') return rule def _create_lb_rule(self, rule): self.result['changed'] = True if not self.module.check_mode: args = self._get_common_args() args['algorithm'] = self.module.params.get('algorithm') args['privateport'] = self.module.params.get('private_port') args['publicport'] = self.module.params.get('public_port') args['cidrlist'] = self.module.params.get('cidr') args['description'] = self.module.params.get('description') args['protocol'] = self.module.params.get('protocol') res = self.cs.createLoadBalancerRule(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: rule = self.poll_job(res, 'loadbalancer') return rule def _update_lb_rule(self, rule): args = {} args['id'] = rule['id'] args['algorithm'] = self.module.params.get('algorithm') args['description'] = self.module.params.get('description') if self.has_changed(args, rule): self.result['changed'] = True if not self.module.check_mode: res = self.cs.updateLoadBalancerRule(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: rule = self.poll_job(res, 'loadbalancer') return rule def absent_lb_rule(self): args = self._get_common_args() rule = self.get_rule(**args) if rule: self.result['changed'] = True if rule and not self.module.check_mode: res = self.cs.deleteLoadBalancerRule(id=rule['id']) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: res = self._poll_job(res, 'loadbalancer') return rule def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name = dict(required=True), description = dict(default=None), algorithm = dict(choices=['source', 'roundrobin', 'leastconn'], default='source'), private_port = dict(type='int', default=None), public_port = dict(type='int', default=None), protocol = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), ip_address = dict(required=True, aliases=['public_ip']), cidr = dict(default=None), project = dict(default=None), open_firewall = dict(type='bool', default=False), tags = dict(type='list', aliases=['tag'], default=None), zone = dict(default=None), domain = dict(default=None), account = dict(default=None), poll_async = dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") try: acs_lb_rule = AnsibleCloudStackLBRule(module) state = module.params.get('state') if state in ['absent']: rule = acs_lb_rule.absent_lb_rule() else: rule = acs_lb_rule.present_lb_rule() result = acs_lb_rule.get_result(rule) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
sujeet4github/MyLangUtils
LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/sqlalchemy/ext/serializer.py
32
5586
# ext/serializer.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Serializer/Deserializer objects for usage with SQLAlchemy query structures, allowing "contextual" deserialization. Any SQLAlchemy query structure, either based on sqlalchemy.sql.* or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session etc. which are referenced by the structure are not persisted in serialized form, but are instead re-associated with the query structure when it is deserialized. Usage is nearly the same as that of the standard Python pickle module:: from sqlalchemy.ext.serializer import loads, dumps metadata = MetaData(bind=some_engine) Session = scoped_session(sessionmaker()) # ... define mappers query = Session.query(MyClass). filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) # pickle the query serialized = dumps(query) # unpickle. Pass in metadata + scoped_session query2 = loads(serialized, metadata, Session) print query2.all() Similar restrictions as when using raw pickle apply; mapped classes must be themselves be pickleable, meaning they are importable from a module-level namespace. The serializer module is only appropriate for query structures. It is not needed for: * instances of user-defined classes. These contain no references to engines, sessions or expression constructs in the typical case and can be serialized directly. * Table metadata that is to be loaded entirely from the serialized structure (i.e. is not already declared in the application). Regular pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, typically one which was reflected from an existing database at some previous point in time. The serializer module is specifically for the opposite case, where the Table metadata is already present in memory. """ from ..orm import class_mapper from ..orm.session import Session from ..orm.mapper import Mapper from ..orm.interfaces import MapperProperty from ..orm.attributes import QueryableAttribute from .. import Table, Column from ..engine import Engine from ..util import pickle, byte_buffer, b64encode, b64decode, text_type import re __all__ = ['Serializer', 'Deserializer', 'dumps', 'loads'] def Serializer(*args, **kw): pickler = pickle.Pickler(*args, **kw) def persistent_id(obj): # print "serializing:", repr(obj) if isinstance(obj, QueryableAttribute): cls = obj.impl.class_ key = obj.impl.key id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) elif isinstance(obj, Mapper) and not obj.non_primary: id = "mapper:" + b64encode(pickle.dumps(obj.class_)) elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \ ":" + obj.key elif isinstance(obj, Table): id = "table:" + text_type(obj.key) elif isinstance(obj, Column) and isinstance(obj.table, Table): id = "column:" + \ text_type(obj.table.key) + ":" + text_type(obj.key) elif isinstance(obj, Session): id = "session:" elif isinstance(obj, Engine): id = "engine:" else: return None return id pickler.persistent_id = persistent_id return pickler our_ids = re.compile( r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)') def Deserializer(file, metadata=None, scoped_session=None, engine=None): unpickler = pickle.Unpickler(file) def get_engine(): if engine: return engine elif scoped_session and scoped_session().bind: return scoped_session().bind elif metadata and metadata.bind: return metadata.bind else: return None def persistent_load(id): m = our_ids.match(text_type(id)) if not m: return None else: type_, args = m.group(1, 2) if type_ == 'attribute': key, clsarg = args.split(":") cls = pickle.loads(b64decode(clsarg)) return getattr(cls, key) elif type_ == "mapper": cls = pickle.loads(b64decode(args)) return class_mapper(cls) elif type_ == "mapperprop": mapper, keyname = args.split(':') cls = pickle.loads(b64decode(mapper)) return class_mapper(cls).attrs[keyname] elif type_ == "table": return metadata.tables[args] elif type_ == "column": table, colname = args.split(':') return metadata.tables[table].c[colname] elif type_ == "session": return scoped_session() elif type_ == "engine": return get_engine() else: raise Exception("Unknown token: %s" % type_) unpickler.persistent_load = persistent_load return unpickler def dumps(obj, protocol=0): buf = byte_buffer() pickler = Serializer(buf, protocol) pickler.dump(obj) return buf.getvalue() def loads(data, metadata=None, scoped_session=None, engine=None): buf = byte_buffer(data) unpickler = Deserializer(buf, metadata, scoped_session, engine) return unpickler.load()
gpl-3.0
bobintetley/asm3
src/asm3/locales/locale_th.py
1
126059
# th.po val = {" days." : "", "(all)" : "", "(any)" : "", "(anyone)" : "(&#3652;&#3617;&#3656;&#3617;&#3637;)", "(available)" : "", "(blank)" : "", "(both)" : "", "(everyone)" : "", "(master user, not editable)" : "", "(no change)" : "", "(no deduction)" : "", "(none)" : "(&#3652;&#3617;&#3656;&#3617;&#3637;)", "(unknown)" : "", "(use system)" : "", "({0} given, {1} remaining)" : "", "1 treatment" : "", "1 week" : "1 &#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "1 year" : "", "2 weeks" : "2 &#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "3 months" : "&#3648;&#3604;&#3639;&#3629;&#3609;", "4 weeks" : "4 &#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "5 Year" : "", "6 months" : "&#3648;&#3604;&#3639;&#3629;&#3609;", "6 weeks" : "6 &#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "8 weeks" : "8 &#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "9 months" : "&#3648;&#3604;&#3639;&#3629;&#3609;", "A (Stray Dog)" : "", "A description or other information about the animal" : "", "A list of areas this person will homecheck - eg: S60 S61" : "", "A movement must have a reservation date or type." : "", "A person is required for this movement type." : "", "A publish job is already running." : "", "A short version of the reference number" : "", "A task is already running." : "", "A unique number to identify this movement" : "", "A unique reference for this litter" : "", "A4" : "", "ACO" : "", "AM" : "", "ASM" : "", "ASM 3 is compatible with your iPad and other tablets." : "", "ASM News" : "", "ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "", "ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "", "ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "", "Abandoned" : "", "Abuse" : "", "Abyssinian" : "", "Access System Menu" : "", "Account" : "", "Account Types" : "", "Account code '{0}' has already been used." : "", "Account code '{0}' is not valid." : "", "Account code cannot be blank." : "", "Account disabled." : "", "Accountant" : "", "Accounts" : "", "Accounts need a code." : "", "Active" : "", "Active Incidents" : "", "Active Trap Loans" : "", "Active users: {0}" : "", "Add" : "", "Add Accounts" : "", "Add Animal" : "&#3648;&#3614;&#3636;&#3656;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;", "Add Animals" : "&#3648;&#3614;&#3636;&#3656;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;", "Add Appointment" : "", "Add Call" : "", "Add Citations" : "", "Add Clinic Appointment" : "", "Add Cost" : "", "Add Diary" : "", "Add Diets" : "", "Add Document to Repository" : "", "Add Flag" : "", "Add Found Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add Incidents" : "", "Add Investigation" : "", "Add Invoice Item" : "", "Add Licenses" : "", "Add Litter" : "", "Add Log" : "", "Add Log to Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add Lost Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add Media" : "", "Add Medical Records" : "", "Add Message" : "", "Add Movement" : "", "Add Payments" : "", "Add Person" : "", "Add Report" : "", "Add Rota" : "", "Add Stock" : "", "Add Tests" : "", "Add Transport" : "", "Add Trap Loans" : "", "Add Users" : "", "Add Vaccinations" : "", "Add Vouchers" : "", "Add Waiting List" : "&#3604;&#3641;&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3619;&#3629;", "Add a diary note" : "", "Add a found animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add a log entry" : "", "Add a lost animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add a medical regimen" : "", "Add a new animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add a new log" : "", "Add a new person" : "", "Add a person" : "", "Add a photo" : "", "Add a test" : "", "Add a vaccination" : "", "Add account" : "", "Add additional field" : "", "Add an animal to the waiting list" : "", "Add citation" : "", "Add cost" : "", "Add details of this email to the log after sending" : "", "Add diary" : "", "Add diary task" : "", "Add diet" : "", "Add extra images for use in reports and documents" : "", "Add form field" : "", "Add found animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add investigation" : "", "Add license" : "", "Add litter" : "", "Add log" : "", "Add lost animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Add medical profile" : "", "Add medical regimen" : "", "Add message" : "", "Add movement" : "", "Add online form" : "", "Add payment" : "", "Add person" : "", "Add report" : "", "Add role" : "", "Add rota item" : "", "Add stock" : "", "Add template" : "", "Add test" : "", "Add this text to all animal descriptions" : "", "Add to log" : "", "Add transport" : "", "Add trap loan" : "", "Add user" : "", "Add vaccination" : "", "Add voucher" : "", "Add waiting list" : "&#3604;&#3641;&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3619;&#3629;", "Add {0}" : "", "Added" : "", "Added by {0} on {1}" : "", "Additional" : "&#3648;&#3614;&#3636;&#3656;&#3617;&#3648;&#3605;&#3636;&#3617;", "Additional Fields" : "", "Additional date field '{0}' contains an invalid date." : "", "Additional fields" : "", "Additional fields need a name, label and type." : "", "Address" : "&#3607;&#3637;&#3656;&#3629;&#3618;&#3641;&#3656;", "Address Contains" : "", "Address contains" : "", "Administered" : "", "Administering Vet" : "", "Adopt" : "", "Adopt an animal" : "", "Adoptable" : "", "Adoptable Animal" : "", "Adoptable and published for the first time" : "", "Adopted" : "", "Adopted Animals" : "&#3648;&#3614;&#3636;&#3656;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;", "Adopted Transferred In {0}" : "", "Adoption" : "", "Adoption Coordinator" : "", "Adoption Coordinator and Fosterer" : "", "Adoption Event" : "", "Adoption Fee" : "", "Adoption Number" : "", "Adoption fee donations" : "", "Adoption movements must have a valid adoption date." : "", "Adoption successfully created." : "", "Adoptions {0}" : "", "Adult" : "", "Advanced" : "&#3629;&#3618;&#3656;&#3634;&#3591;&#3586;&#3633;&#3657;&#3609;&#3626;&#3641;&#3591;", "Advanced find animal screen defaults to on shelter" : "", "Affenpinscher" : "", "Afghan Hound" : "", "African Grey" : "", "After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "", "Age" : "&#3629;&#3634;&#3618;&#3640;", "Age Group" : "", "Age Group 1" : "", "Age Group 2" : "", "Age Group 3" : "", "Age Group 4" : "", "Age Group 5" : "", "Age Group 6" : "", "Age Group 7" : "", "Age Group 8" : "", "Age Groups" : "", "Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "", "Aged Between" : "", "Aged From" : "", "Aged To" : "", "Aggression" : "", "Airedale Terrier" : "", "Akbash" : "", "Akita" : "", "Alaskan Malamute" : "", "Alerts" : "", "All Animals" : "&#3648;&#3614;&#3636;&#3656;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;", "All On-Shelter Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "All Publishers" : "", "All accounts" : "", "All animal care officers on file." : "", "All animal shelters on file." : "", "All animals matching current publishing options." : "", "All animals on the shelter." : "", "All animals where the hold ends today." : "", "All animals who are currently held in case of reclaim." : "", "All animals who are currently quarantined." : "", "All animals who are flagged as not for adoption." : "", "All animals who have been on the shelter longer than {0} months." : "", "All animals who have not been microchipped" : "", "All banned owners on file." : "", "All diary notes" : "", "All donors on file." : "", "All drivers on file." : "", "All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "", "All fields should be completed." : "", "All fosterers on file." : "", "All homechecked owners on file." : "", "All homecheckers on file." : "", "All members on file." : "", "All notes upto today" : "", "All people on file." : "", "All retailers on file." : "", "All staff on file." : "", "All time" : "", "All vets on file." : "", "All volunteers on file." : "", "Allergies" : "", "Allow a fosterer to be selected" : "", "Allow an adoption coordinator to be selected" : "", "Allow creation of payments on the Move-Reserve screen" : "", "Allow drag and drop to move animals between locations" : "", "Allow duplicate license numbers" : "", "Allow duplicate microchip numbers" : "", "Allow overriding of the movement number on the Move menu screens" : "", "Allow use of OpenOffice document templates" : "", "Alphabetically A-Z" : "", "Alphabetically Z-A" : "", "Already Signed" : "", "Already fostered to this person." : "", "Altered" : "", "Altered Date" : "", "Altered Dog - 1 year" : "", "Altered Dog - 3 year" : "", "Altering Vet" : "", "Always show an emblem to indicate the current location" : "", "Amazon" : "", "Amber" : "", "American" : "&#3629;&#3648;&#3617;&#3619;&#3636;&#3585;&#3633;&#3609;", "American Bulldog" : "", "American Curl" : "", "American Eskimo Dog" : "", "American Fuzzy Lop" : "", "American Sable" : "", "American Shorthair" : "", "American Staffordshire Terrier" : "", "American Water Spaniel" : "", "American Wirehair" : "", "Amount" : "", "An age in years, eg: 1, 0.5" : "", "An animal cannot have multiple open movements." : "", "An optional comma separated list of email addresses to send the output of this report to" : "", "Anatolian Shepherd" : "", "Angora Rabbit" : "", "Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;", "Animal '{0}' created with code {1}" : "", "Animal '{0}' successfully marked deceased." : "", "Animal (optional)" : "", "Animal (via animalname field)" : "", "Animal - Additional" : "", "Animal - Death" : "", "Animal - Details" : "", "Animal - Entry" : "", "Animal - Health and Identification" : "", "Animal - Notes" : "", "Animal Codes" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3586;&#3629;&#3591;&#3626;&#3633;&#3605;&#3623;&#3660;", "Animal Control" : "", "Animal Control Caller" : "", "Animal Control Incident" : "", "Animal Control Officer" : "", "Animal Control Victim" : "", "Animal Emblems" : "", "Animal Flags" : "", "Animal Links" : "", "Animal Name" : "&#3594;&#3639;&#3656;&#3629;&#3626;&#3633;&#3605;&#3623;&#3660;", "Animal Selection" : "", "Animal Shelter Manager" : "", "Animal Shelter Manager Login" : "", "Animal Sponsorship" : "", "Animal Type" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3586;&#3629;&#3591;&#3626;&#3633;&#3605;&#3623;&#3660;", "Animal Types" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3586;&#3629;&#3591;&#3626;&#3633;&#3605;&#3623;&#3660;", "Animal board costs" : "", "Animal cannot be deceased before it was brought to the shelter" : "", "Animal code format" : "", "Animal comments MUST contain this phrase in order to match." : "", "Animal control calendar" : "", "Animal control incidents matching '{0}'." : "", "Animal defecation" : "", "Animal descriptions" : "", "Animal destroyed" : "", "Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "", "Animal food costs" : "", "Animal picked up" : "", "Animal shortcode format" : "", "Animals" : "&#3626;&#3633;&#3605;&#3623;&#3660;", "Animals at large" : "", "Animals left in vehicle" : "", "Animals matching '{0}'." : "", "Animals per page" : "", "Annual" : "&#3619;&#3634;&#3618;&#3611;&#3637;", "Annually" : "&#3619;&#3634;&#3618;&#3611;&#3637;", "Anonymize" : "", "Anonymize personal data after this many years" : "", "Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "", "Any health problems the animal has" : "", "Any information about the animal" : "", "Any markings or distinguishing features the animal has" : "", "Appaloosa" : "", "Appenzell Mountain Dog" : "", "Applehead Siamese" : "", "Appointment" : "", "Appointment date must be a valid date" : "", "Appointment {0}. {1} on {2} for {3}" : "", "Appointments need a date and time." : "", "Approved" : "", "Apr" : "&#3648;&#3617;.&#3618;.", "April" : "&#3648;&#3617;&#3625;&#3634;&#3618;&#3609;", "Arabian" : "", "Area" : "", "Area Found" : "&#3648;&#3586;&#3605;&#3607;&#3637;&#3656;&#3614;&#3610;", "Area Lost" : "", "Area Postcode" : "", "Area where the animal was found" : "", "Area where the animal was lost" : "", "Areas" : "", "Arrived" : "", "Asset" : "", "Asset::Premises" : "", "At least the last name should be completed." : "", "Attach" : "", "Attach File" : "", "Attach Link" : "", "Attach a file" : "", "Attach a link to a web resource" : "", "Attach link" : "", "Audit Trail" : "", "Aug" : "&#3626;.&#3588;.", "August" : "&#3626;&#3636;&#3591;&#3627;&#3634;&#3588;&#3617;", "Australian Cattle Dog/Blue Heeler" : "", "Australian Kelpie" : "", "Australian Shepherd" : "", "Australian Terrier" : "", "Auto log users out after this many minutes of inactivity" : "", "Auto removed due to lack of owner contact." : "", "Automatically cancel any outstanding reservations on an animal when it is adopted" : "", "Automatically remove" : "", "Automatically return any outstanding foster movements on an animal when it is adopted" : "", "Automatically return any outstanding foster movements on an animal when it is transferred" : "", "Available for adoption" : "", "Available sheltermanager.com reports" : "", "B (Boarding Animal)" : "", "Baby" : "", "Balance" : "", "Balinese" : "", "Bank" : "&#3629;&#3633;&#3609;&#3604;&#3633;&#3610;", "Bank account interest" : "", "Bank current account" : "", "Bank deposit account" : "", "Bank savings account" : "", "Bank::Current" : "", "Bank::Deposit" : "", "Bank::Savings" : "", "Banned" : "", "Base Color" : "", "Basenji" : "", "Basset Hound" : "", "Batch" : "&#3617;&#3637;&#3609;&#3634;&#3588;&#3617;", "Batch Number" : "", "Beagle" : "", "Bearded Collie" : "", "Beauceron" : "", "Bedlington Terrier" : "", "Beginning of month" : "", "Belgian Hare" : "", "Belgian Shepherd Dog Sheepdog" : "", "Belgian Shepherd Laekenois" : "", "Belgian Shepherd Malinois" : "", "Belgian Shepherd Tervuren" : "", "Bengal" : "", "Bernese Mountain Dog" : "", "Beveren" : "", "Bichon Frise" : "", "Bird" : "&#3609;&#3585;", "Birman" : "", "Bite" : "&#3586;&#3609;&#3634;&#3604;", "Biting" : "", "Black" : "&#3604;&#3635;", "Black Labrador Retriever" : "", "Black Mouth Cur" : "", "Black Tortie" : "", "Black and Brindle" : "", "Black and Brown" : "&#3604;&#3635;&#3649;&#3621;&#3632;&#3609;&#3657;&#3635;&#3605;&#3634;&#3621;", "Black and Tan" : "", "Black and Tan Coonhound" : "", "Black and White" : "&#3604;&#3635;&#3649;&#3621;&#3632;&#3586;&#3634;&#3623;", "Bloodhound" : "", "Blue" : "&#3609;&#3657;&#3635;&#3648;&#3591;&#3636;&#3609;", "Blue Tortie" : "", "Bluetick Coonhound" : "", "Board and Food" : "", "Boarding" : "", "Boarding Cost" : "", "Boarding cost type" : "", "Bobtail" : "", "Body" : "", "Bombay" : "", "Bonded" : "", "Bonded With" : "", "Books" : "", "Border Collie" : "", "Border Terrier" : "", "Bordetella" : "", "Born in Shelter" : "", "Born on Foster {0}" : "", "Born on Shelter {0}" : "", "Borzoi" : "", "Boston Terrier" : "", "Both" : "", "Bouvier des Flanders" : "", "Boxer" : "", "Boykin Spaniel" : "", "Breed" : "&#3626;&#3634;&#3618;&#3614;&#3633;&#3609;&#3608;&#3640;&#3660;", "Breed to use when publishing to third party services and adoption sites" : "", "Breeds" : "&#3626;&#3634;&#3618;&#3614;&#3633;&#3609;&#3608;&#3640;&#3660;", "Briard" : "", "Brindle" : "", "Brindle and Black" : "", "Brindle and White" : "", "Britannia Petite" : "", "British Shorthair" : "", "Brittany Spaniel" : "", "Brotogeris" : "", "Brought In" : "", "Brought In By" : "", "Brown" : "&#3609;&#3657;&#3635;&#3605;&#3634;&#3621;", "Brown and Black" : "&#3609;&#3657;&#3635;&#3605;&#3634;&#3621;&#3649;&#3621;&#3632;&#3604;&#3635;", "Brown and White" : "&#3609;&#3657;&#3635;&#3605;&#3634;&#3621;&#3649;&#3621;&#3632;&#3586;&#3634;&#3623;", "Browse sheltermanager.com" : "", "Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "", "Brussels Griffon" : "", "Budgie/Budgerigar" : "", "Bulk Complete Diary" : "", "Bulk Complete Medical Records" : "", "Bulk Complete Vaccinations" : "", "Bulk Complete Waiting List" : "", "Bulk Regimen" : "", "Bulk Test" : "", "Bulk Transport" : "", "Bulk Vaccination" : "", "Bulk change animals" : "", "Bull Terrier" : "", "Bullmastiff" : "", "Bunny Rabbit" : "", "Burmese" : "", "Burmilla" : "", "By" : "", "CC" : "", "CSV of animal/adopter data" : "", "CSV of animal/medical data" : "", "CSV of incident data" : "", "CSV of license data" : "", "CSV of payment data" : "", "CSV of person data" : "", "Caique" : "", "Cairn Terrier" : "", "Calendar View" : "", "Calendar view" : "", "Calico" : "", "Californian" : "", "Call" : "", "Call Date/Time" : "", "Caller" : "", "Caller Name" : "", "Caller Phone" : "", "Camel" : "", "Can Login" : "&#3648;&#3586;&#3657;&#3634;&#3626;&#3641;&#3656;&#3619;&#3632;&#3610;&#3610;", "Can afford donation?" : "", "Can't reserve an animal that has an active movement." : "", "Canaan Dog" : "", "Canadian Hairless" : "", "Canary" : "", "Cancel" : "&#3618;&#3585;&#3648;&#3621;&#3636;&#3585;", "Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "", "Cancel unadopted reservations after" : "", "Cancel unadopted reservations after this many days, or 0 to never cancel" : "", "Cancelled" : "", "Cancelled Reservation" : "", "Cane Corso Mastiff" : "", "Carolina Dog" : "", "Cash" : "", "Cat" : "&#3649;&#3617;&#3623;", "Catahoula Leopard Dog" : "", "Category" : "&#3627;&#3617;&#3623;&#3604;&#3627;&#3617;&#3641;&#3656;", "Cats" : "&#3649;&#3617;&#3623;", "Cattery" : "", "Cattle Dog" : "", "Cavalier King Charles Spaniel" : "", "Cell" : "", "Cell Phone" : "", "Champagne D'Argent" : "", "Change" : "", "Change Accounts" : "", "Change Animals" : "", "Change Citations" : "", "Change Clinic Apointment" : "", "Change Cost" : "", "Change Date Required" : "", "Change Diets" : "", "Change Found Animal" : "", "Change Incidents" : "", "Change Investigation" : "", "Change Licenses" : "", "Change Litter" : "", "Change Log" : "", "Change Lost Animal" : "", "Change Media" : "", "Change Medical Records" : "", "Change Movement" : "", "Change Password" : "", "Change Payments" : "", "Change Person" : "", "Change Publishing Options" : "", "Change Report" : "", "Change Rota" : "", "Change Stock" : "", "Change System Options" : "", "Change Tests" : "", "Change Transactions" : "", "Change Transport" : "", "Change Trap Loans" : "", "Change User Settings" : "", "Change Vaccinations" : "", "Change Vouchers" : "", "Change Waiting List" : "", "Change date required on selected treatments" : "", "Changed Mind" : "", "Chart" : "", "Chart (Bar)" : "", "Chart (Line)" : "", "Chart (Pie)" : "", "Chart (Point)" : "", "Chart (Steps)" : "", "Chartreux" : "", "Check" : "", "Check License" : "", "Check No" : "", "Checkbox" : "", "Checked By" : "", "Checkered Giant" : "", "Cheque" : "", "Chesapeake Bay Retriever" : "", "Chicken" : "", "Chihuahua" : "", "Children" : "", "Chinchilla" : "", "Chinese Crested Dog" : "", "Chinese Foo Dog" : "", "Chlamydophila" : "", "Chocolate" : "", "Chocolate Labrador Retriever" : "", "Chocolate Tortie" : "", "Chow Chow" : "", "Cinnamon" : "", "Cinnamon Tortoiseshell" : "", "Citation Type" : "", "Citation Types" : "", "Citations" : "", "City" : "", "City contains" : "", "Class" : "", "Clear" : "&#3648;&#3588;&#3621;&#3637;&#3618;&#3619;&#3660;", "Clear and sign again" : "", "Clear tables before importing" : "", "Clinic" : "", "Clinic Calendar" : "", "Clinic Invoice - {0}" : "", "Clinic Statuses" : "", "Clone" : "&#3611;&#3636;&#3604;", "Clone Animals" : "", "Clone Rota" : "", "Clone the rota this week to another week" : "", "Cloning..." : "&#3585;&#3635;&#3621;&#3633;&#3591;&#3592;&#3633;&#3604;&#3648;&#3619;&#3637;&#3618;&#3591;...", "Close" : "&#3611;&#3636;&#3604;", "Clumber Spaniel" : "", "Clydesdale" : "", "Coat" : "&#3649;&#3617;&#3623;", "Coat Type" : "", "Coat Types" : "", "Cockapoo" : "", "Cockatiel" : "", "Cockatoo" : "", "Cocker Spaniel" : "", "Code" : "&#3650;&#3588;&#3657;&#3604;", "Code contains" : "", "Code format tokens:" : "", "Collie" : "", "Color" : "", "Color to use when publishing to third party services and adoption sites" : "", "Colors" : "", "Columns" : "", "Columns displayed" : "", "Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "", "Comments" : "&#3588;&#3629;&#3617;&#3648;&#3617;&#3657;&#3609;&#3605;&#3660;", "Comments Contain" : "", "Comments contain" : "", "Comments copied to web preferred media." : "", "Complaint" : "", "Complete" : "&#3648;&#3626;&#3619;&#3655;&#3592;&#3649;&#3621;&#3657;&#3623;", "Complete Tasks" : "", "Completed" : "&#3648;&#3626;&#3619;&#3655;&#3592;&#3649;&#3621;&#3657;&#3623;", "Completed Between" : "", "Completed Type" : "", "Completed notes upto today" : "", "Completion Date" : "", "Completion Type" : "", "Configuration" : "", "Confirm" : "", "Confirm Password" : "", "Confirmation message" : "", "Confirmed" : "", "Consulting Room" : "", "Consulting Room - {0}" : "", "Consumed" : "", "Contact" : "&#3605;&#3636;&#3604;&#3605;&#3656;&#3629;", "Contact Contains" : "", "Conure" : "", "Convert this reservation to an adoption" : "", "Coonhound" : "", "Copy animal comments to the notes field of the web preferred media for this animal" : "", "Copy from animal comments" : "", "Copy of {0}" : "&#3626;&#3635;&#3648;&#3609;&#3634;&#3586;&#3629;&#3591; {0}", "Corded" : "", "Corgi" : "", "Cornish Rex" : "", "Cost" : "&#3649;&#3617;&#3623;", "Cost For" : "", "Cost Type" : "", "Cost Types" : "", "Cost date must be a valid date" : "", "Cost record" : "", "Costs" : "&#3649;&#3617;&#3623;", "Costs need a date and amount." : "", "Coton de Tulear" : "", "Could not find animal with name '{0}'" : "", "Country" : "&#3611;&#3619;&#3632;&#3648;&#3607;&#3624;:", "Courtesy Listing" : "", "Cow" : "", "Cream" : "&#3588;&#3619;&#3637;&#3617;", "Create" : "", "Create Animal" : "", "Create Log" : "", "Create Payment" : "&#3626;&#3619;&#3657;&#3634;&#3591;&#3650;&#3604;&#3618;", "Create Waiting List" : "", "Create a cost record" : "", "Create a due or received payment record from this appointment" : "", "Create a new animal by copying this one" : "", "Create a new animal from this found animal record" : "", "Create a new animal from this incident" : "", "Create a new animal from this waiting list entry" : "", "Create a new document" : "", "Create a new template" : "", "Create a new template by copying the selected template" : "", "Create a new waiting list entry from this found animal record" : "", "Create and edit" : "", "Create boarding cost record when animal is adopted" : "", "Create diary notes from a task" : "", "Create missing lookup values" : "", "Create note this many days from today, or 9999 to ask" : "", "Create this message" : "", "Create this person" : "", "Created By" : "&#3626;&#3619;&#3657;&#3634;&#3591;&#3650;&#3604;&#3618;", "Creating cost and cost types creates matching accounts and transactions" : "", "Creating payments and payments types creates matching accounts and transactions" : "", "Creating..." : "", "Credit Card" : "", "Creme D'Argent" : "", "Criteria:" : "", "Crossbreed" : "", "Cruelty Case" : "", "Culling" : "", "Curly" : "", "Current" : "", "Current Vet" : "", "Cymric" : "", "D (Dog)" : "", "DD = current day" : "", "DDL dump (DB2)" : "", "DDL dump (MySQL)" : "", "DDL dump (PostgreSQL)" : "", "DHLPP" : "", "DO NOT use this field to store notes about what the person is looking for." : "", "DOA {0}" : "", "DOB" : "", "Dachshund" : "", "Daily Boarding Cost" : "", "Dalmatian" : "", "Dandi Dinmont Terrier" : "", "Data" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;", "Data Protection" : "", "Database" : "&#3600;&#3634;&#3609;&#3586;&#3657;&#3629;&#3617;&#3641;&#3621;", "Date" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;", "Date '{0}' is not valid." : "", "Date Brought In" : "", "Date Found" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3614;&#3610;", "Date Lost" : "", "Date Of Birth" : "", "Date Put On" : "", "Date Removed" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3648;&#3629;&#3634;&#3629;&#3629;&#3585;", "Date Reported" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3648;&#3629;&#3634;&#3629;&#3629;&#3585;", "Date and notes are mandatory." : "", "Date brought in cannot be blank" : "", "Date brought in cannot be in the future." : "", "Date brought in is not valid" : "", "Date found cannot be blank" : "", "Date found cannot be blank." : "", "Date lost cannot be blank" : "", "Date lost cannot be blank." : "", "Date of Birth" : "", "Date of birth cannot be blank" : "", "Date of birth cannot be in the future." : "", "Date of birth is not valid" : "", "Date of last owner contact" : "", "Date put on" : "", "Date put on cannot be blank" : "", "Date put on list" : "", "Date removed" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3648;&#3629;&#3634;&#3629;&#3629;&#3585;", "Date reported cannot be blank" : "", "Date reported cannot be blank." : "", "Date/Time" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;/&#3648;&#3623;&#3621;&#3634;", "Day" : "", "Day Pivot" : "", "Days On Shelter" : "", "Dead On Arrival" : "", "Dead animal" : "", "Dead on arrival" : "", "Death" : "", "Death Comments" : "", "Death Reason" : "&#3648;&#3627;&#3605;&#3640;&#3612;&#3621;&#3607;&#3637;&#3656;&#3648;&#3626;&#3637;&#3618;&#3594;&#3637;&#3623;&#3636;&#3605;", "Death Reasons" : "&#3648;&#3627;&#3605;&#3640;&#3612;&#3621;&#3607;&#3637;&#3656;&#3648;&#3626;&#3637;&#3618;&#3594;&#3637;&#3623;&#3636;&#3605;", "Debit Card" : "", "Dec" : "&#3608;.&#3588;.", "Deceased" : "", "Deceased Date" : "", "December" : "&#3608;&#3633;&#3609;&#3623;&#3634;&#3588;&#3617;", "Declawed" : "", "Declined" : "", "Default Breed" : "", "Default Brought In By" : "", "Default Coat Type" : "", "Default Color" : "", "Default Cost" : "", "Default Death Reason" : "", "Default Diary Person" : "", "Default Entry Reason" : "", "Default Incident Type" : "", "Default Location" : "", "Default Log Filter" : "", "Default Log Type" : "", "Default Payment Method" : "", "Default Payment Type" : "", "Default Reservation Status" : "", "Default Return Reason" : "", "Default Rota Shift" : "", "Default Size" : "", "Default Species" : "", "Default Test Type" : "", "Default Type" : "", "Default Vaccination Type" : "", "Default Value" : "", "Default daily boarding cost" : "", "Default destination account for payments" : "", "Default image for documents" : "", "Default image for this record and the web" : "", "Default source account for costs" : "", "Default to advanced find animal screen" : "", "Default to advanced find person screen" : "", "Default transaction view" : "", "Default urgency" : "", "Default video for publishing" : "", "Default view" : "", "Defaults" : "&#3588;&#3656;&#3634;&#3611;&#3619;&#3636;&#3618;&#3634;&#3618;", "Defaults formats for code and shortcode are TYYYYNNN and NNT" : "", "Delete" : "&#3621;&#3610;", "Delete Accounts" : "", "Delete Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "Delete Citations" : "", "Delete Clinic Appointment" : "", "Delete Cost" : "", "Delete Diary" : "", "Delete Diets" : "", "Delete Document from Repository" : "", "Delete Found Animal" : "", "Delete Incidents" : "", "Delete Incoming Forms" : "", "Delete Investigation" : "", "Delete Licenses" : "", "Delete Litter" : "", "Delete Log" : "", "Delete Lost Animal" : "", "Delete Media" : "", "Delete Medical Records" : "", "Delete Movement" : "", "Delete Payments" : "", "Delete Person" : "", "Delete Regimen" : "", "Delete Report" : "", "Delete Rota" : "", "Delete Stock" : "", "Delete Tests" : "", "Delete Transport" : "", "Delete Trap Loans" : "", "Delete Treatments" : "", "Delete Vaccinations" : "", "Delete Vouchers" : "", "Delete Waiting List" : "", "Delete all rota entries for this week" : "", "Delete this animal" : "", "Delete this incident" : "", "Delete this person" : "", "Delete this record" : "", "Delete this waiting list entry" : "", "Denied" : "", "Deposit" : "", "Deposit Account" : "", "Deposit Returned" : "", "Description" : "&#3588;&#3635;&#3629;&#3608;&#3636;&#3610;&#3634;&#3618;", "Description Contains" : "", "Description cannot be blank" : "", "Deselect" : "", "Details" : "&#3619;&#3634;&#3618;&#3621;&#3632;&#3648;&#3629;&#3637;&#3618;&#3604;", "Devon Rex" : "", "Dialog title" : "", "Diary" : "", "Diary Task" : "", "Diary Task: {0}" : "", "Diary Tasks" : "", "Diary and Messages" : "", "Diary calendar" : "", "Diary date cannot be blank" : "", "Diary date is not valid" : "", "Diary for {0}" : "", "Diary note cannot be blank" : "", "Diary note {0} marked completed" : "", "Diary note {0} rediarised for {1}" : "", "Diary notes for: {0}" : "", "Diary notes need a date and subject." : "", "Diary subject cannot be blank" : "", "Diary task items need a pivot, subject and note." : "", "Diary tasks need a name." : "", "Did not ask" : "", "Did you know?" : "", "Died" : "", "Died off shelter" : "", "Died {0}" : "", "Diet" : "", "Diets" : "", "Diets need a start date." : "", "Dispatch" : "", "Dispatch Address" : "", "Dispatch Between" : "", "Dispatch Date/Time" : "", "Dispatch {0}: {1}" : "", "Dispatched ACO" : "", "Display" : "", "Display Index" : "", "Display a search button at the right side of the search box" : "", "Distemper" : "", "Do Not Publish" : "&#3648;&#3612;&#3618;&#3649;&#3614;&#3619;&#3656;", "Do Not Register Microchip" : "", "Do not show" : "", "Doberman Pinscher" : "", "Document" : "", "Document Link" : "", "Document Repository" : "", "Document Templates" : "", "Document file" : "", "Document signed" : "", "Document signing request" : "", "Document templates" : "", "Documents" : "", "Dog" : "&#3626;&#3640;&#3609;&#3633;&#3586;", "Dogo Argentino" : "", "Dogs" : "&#3626;&#3640;&#3609;&#3633;&#3586;", "Dogue de Bordeaux" : "", "Domestic Long Hair" : "", "Domestic Medium Hair" : "", "Domestic Short Hair" : "", "Don't create a cost record" : "", "Don't scale" : "", "Donated" : "", "Donation" : "&#3610;&#3619;&#3636;&#3592;&#3634;&#3588;", "Donation?" : "&#3610;&#3619;&#3636;&#3592;&#3634;&#3588;&#3627;&#3619;&#3639;&#3629;&#3648;&#3611;&#3621;&#3656;&#3634;?", "Donations for animals entering the shelter" : "", "Done" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Donkey" : "", "Donkey/Mule" : "", "Donor" : "", "Dosage" : "", "Dove" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Download" : "", "Draft" : "", "Driver" : "", "Drop files here..." : "", "Dropoff" : "", "Duck" : "&#3648;&#3611;&#3655;&#3604;", "Due" : "", "Due in next month" : "", "Due in next week" : "", "Due in next year" : "", "Due today" : "", "Duration" : "&#3594;&#3656;&#3623;&#3591;&#3619;&#3632;&#3618;&#3632;", "Dutch" : "", "Dutch Shepherd" : "", "Dwarf" : "", "Dwarf Eared" : "", "E = first letter of animal entry category" : "", "EE = first and second letter of animal entry category" : "", "Eclectus" : "", "Edit" : "&#3649;&#3585;&#3657;&#3652;&#3586;", "Edit All Diary Notes" : "", "Edit Appointment" : "", "Edit Diary Tasks" : "", "Edit HTML publishing templates" : "", "Edit Header/Footer" : "", "Edit Invoice Item" : "", "Edit Lookups" : "", "Edit My Diary Notes" : "", "Edit Online Forms" : "", "Edit Reports" : "", "Edit Roles" : "", "Edit Users" : "", "Edit account" : "", "Edit additional field" : "", "Edit citation" : "", "Edit cost" : "", "Edit diary" : "", "Edit diary notes" : "", "Edit diary task" : "", "Edit diary tasks" : "", "Edit diet" : "", "Edit document" : "", "Edit form field" : "", "Edit investigation" : "", "Edit invoice" : "&#3649;&#3585;&#3657;&#3652;&#3586;&#3648;&#3592;&#3657;&#3634;&#3586;&#3629;&#3591;", "Edit license" : "", "Edit litter" : "", "Edit litters" : "", "Edit log" : "", "Edit media notes" : "", "Edit medical profile" : "", "Edit medical regimen" : "", "Edit movement" : "", "Edit my diary notes" : "", "Edit my diary notes" : "", "Edit notes" : "", "Edit online form" : "", "Edit online form HTML header/footer" : "", "Edit payment" : "", "Edit report" : "", "Edit report template HTML header/footer" : "", "Edit role" : "", "Edit roles" : "", "Edit rota item" : "", "Edit stock" : "", "Edit system users" : "", "Edit template" : "", "Edit test" : "", "Edit the current waiting list" : "", "Edit transaction" : "", "Edit transport" : "", "Edit trap loan" : "", "Edit user" : "", "Edit vaccination" : "", "Edit voucher" : "", "Edit {0}" : "", "Egyptian Mau" : "", "Electricity Bills" : "", "Email" : "&#3629;&#3637;&#3648;&#3617;&#3621;&#3660;", "Email Address" : "", "Email PDF" : "", "Email Person" : "", "Email To" : "", "Email a copy of the selected HTML documents as PDFs" : "", "Email a copy of the selected media files" : "", "Email address" : "", "Email document for electronic signature" : "", "Email incident notes to ACO" : "", "Email incoming form submissions to this comma separated list of email addresses" : "", "Email media" : "", "Email person" : "", "Email signature" : "", "Email submissions to" : "", "Email this message to all matching users" : "", "Email this person" : "", "Email users their diary notes each day" : "", "Emu" : "&#3629;&#3637;&#3617;&#3641;", "Enable FTP uploading" : "", "Enable accounts functionality" : "", "Enable location filters" : "", "Enable lost and found functionality" : "", "Enable multiple sites" : "", "Enable the waiting list functionality" : "", "Enable visual effects" : "", "Enabled" : "", "End Of Day" : "", "End Time" : "", "End at" : "", "End of month" : "", "End of year" : "", "Ends" : "", "Ends after" : "", "English Bulldog" : "", "English Cocker Spaniel" : "", "English Coonhound" : "", "English Lop" : "", "English Pointer" : "", "English Setter" : "", "English Shepherd" : "", "English Spot" : "", "English Springer Spaniel" : "", "English Toy Spaniel" : "", "Entered (newest first)" : "", "Entered (oldest first)" : "", "Entered From" : "", "Entered To" : "", "Entered shelter" : "", "Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "", "Entering 'deceased' in the search box will show you recently deceased animals." : "", "Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "", "Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "", "Entering 'os' in the search box will show you all shelter animals." : "", "Entlebucher" : "", "Entry" : "", "Entry Category" : "", "Entry Donation" : "", "Entry Reason" : "", "Entry Reason Category" : "", "Entry Reasons" : "", "Entry reason" : "", "Error contacting server." : "", "Escaped" : "", "Escaped {0}" : "", "Eskimo Dog" : "", "Estimate" : "", "Euthanized" : "", "Euthanized {0}" : "", "Every day" : "", "Exclude animals who are aged under" : "", "Exclude from bulk email" : "", "Exclude new animal photos from publishing" : "", "Exclude this image when publishing" : "", "Execute" : "", "Execute Script" : "", "Execute the SQL in the box below" : "", "Executing Task" : "", "Executing..." : "", "Exotic Shorthair" : "", "Expense" : "", "Expense::" : "", "Expenses::Board" : "", "Expenses::Electricity" : "", "Expenses::Food" : "", "Expenses::Gas" : "", "Expenses::Phone" : "", "Expenses::Postage" : "", "Expenses::Stationary" : "", "Expenses::Water" : "", "Expire in next month" : "", "Expired" : "", "Expired in the last month" : "", "Expired in the last week" : "", "Expires" : "", "Expiry" : "", "Expiry date" : "", "Export" : "", "Export Animals as CSV" : "", "Export Report" : "", "Export Reports as CSV" : "", "Export a CSV file of animal records that ASM can import into another database." : "", "Export this database in various formats" : "", "Exporting the complete database can take some time and generate a very large file, are you sure?" : "", "Extra Images" : "", "Extra images" : "", "Extra-Toes Cat (Hemingway Polydactyl)" : "", "F (Feral Cat)" : "", "FECV/FeCoV" : "", "FIPV" : "", "FIV" : "", "FIV Result" : "", "FIV+" : "", "FIV/L Test Date" : "", "FIV/L Tested" : "", "FLV" : "", "FLV Result" : "", "FLV+" : "", "FTP hostname" : "", "FTP password" : "", "FTP username" : "", "FVRCP" : "", "Facebook" : "", "Failed sending email" : "", "Failed to create payment." : "", "Failed to renew license." : "", "Fawn" : "", "Fawn Tortoiseshell" : "", "FeLV" : "", "Features" : "", "Feb" : "&#3585;.&#3614;.", "February" : "&#3585;&#3640;&#3617;&#3616;&#3634;&#3614;&#3633;&#3609;&#3608;&#3660;", "Fee" : "", "Female" : "&#3648;&#3614;&#3624;&#3648;&#3617;&#3637;&#3618;", "Feral" : "", "Ferret" : "", "Field Spaniel" : "", "Field names should not contain spaces." : "", "Fila Brasileiro" : "", "File" : "", "Filter" : "", "Financial" : "", "Finch" : "", "Find Animal" : "&#3588;&#3657;&#3609;&#3627;&#3634;&#3626;&#3633;&#3605;&#3623;&#3660;", "Find Animal/Person" : "", "Find Found Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Find Incident" : "", "Find Lost Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Find Person" : "", "Find a found animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Find a lost animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Find aco" : "", "Find an incident" : "", "Find animal" : "&#3588;&#3657;&#3609;&#3627;&#3634;&#3626;&#3633;&#3605;&#3623;&#3660;", "Find animal columns" : "", "Find animal control incidents returned {0} results." : "", "Find animals matching the looking for criteria of this person" : "", "Find donor" : "", "Find driver" : "", "Find fosterer" : "", "Find found animal returned {0} results." : "", "Find homechecked" : "", "Find homechecker" : "", "Find incident" : "", "Find lost animal returned {0} results." : "", "Find member" : "", "Find person" : "", "Find person columns" : "", "Find retailer" : "", "Find shelter" : "", "Find staff" : "", "Find staff/volunteer" : "", "Find this address on a map" : "", "Find vet" : "", "Find volunteer" : "", "Fine Amount" : "", "Finnish Lapphund" : "", "Finnish Spitz" : "", "First Last" : "", "First Names" : "", "First name(s)" : "", "First offence" : "", "Fish" : "&#3611;&#3621;&#3634;", "Flag" : "", "Flags" : "", "Flat-coated Retriever" : "", "Flemish Giant" : "", "Florida White" : "", "Followup" : "", "Followup Between" : "", "Followup Date/Time" : "", "Footer" : "", "For" : "", "Forbidden" : "", "Forenames" : "", "Forget" : "", "Form URL" : "", "Forms need a name." : "", "Foster" : "", "Foster Book" : "", "Foster Capacity" : "", "Foster Transfer" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;", "Foster an animal" : "", "Foster book" : "", "Foster movements must have a valid foster date." : "", "Foster successfully created." : "", "Fostered" : "", "Fostered Animals" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3626;&#3641;&#3597;&#3627;&#3634;&#3618;", "Fostered to {0} since {1}" : "", "Fosterer" : "", "Fosterer (Active Only)" : "", "Fosterer Medical Report" : "", "Found" : "", "Found Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Found Animal - Additional" : "", "Found Animal - Details" : "", "Found Animal Contact" : "", "Found Animal {0}" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;: {0}", "Found Animal: {0}" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;: {0}", "Found animal - {0} {1} [{2}]" : "", "Found animal entries matching '{0}'." : "", "Found animals must have a contact" : "", "Found animals reported in the last 30 days." : "", "Found from" : "", "Found to" : "", "FoundLost animal entry {0} successfully created." : "", "Fox Terrier" : "", "Foxhound" : "", "Fr" : "", "French Bulldog" : "", "French-Lop" : "", "Frequency" : "&#3588;&#3623;&#3634;&#3617;&#3606;&#3637;&#3656;", "Frequently Asked Questions" : "", "Fri" : "", "Friday" : "", "From" : "", "From Fostering" : "", "From Other" : "", "From retailer is only valid on adoption movements." : "", "Future notes" : "", "GDPR Contact Opt-In" : "", "Gaited" : "", "Gas Bills" : "", "Gecko" : "", "General" : "", "Generate" : "", "Generate Documents" : "", "Generate HTML from this SQL" : "", "Generate Report" : "", "Generate a document from this animal" : "", "Generate a document from this incident" : "", "Generate a document from this movement" : "", "Generate a document from this person" : "", "Generate a document from this record" : "", "Generate a javascript database for the search page" : "", "Generate a new animal code" : "", "Generate a random name for this animal" : "", "Generate document from this appointment" : "", "Generate document from this license" : "", "Generate document from this payment" : "", "Generate document from this transport" : "", "Generate documentation" : "", "Generate documents" : "", "Generate image thumbnails as tn_$$IMAGE$$" : "", "Generated document '{0}'" : "", "Gerbil" : "", "German Pinscher" : "", "German Shepherd Dog" : "", "German Shorthaired Pointer" : "", "German Wirehaired Pointer" : "", "Get more reports from sheltermanager.com" : "", "Gift Aid" : "", "GiftAid" : "", "Giftaid" : "", "Ginger" : "", "Ginger and White" : "&#3648;&#3607;&#3634;&#3649;&#3621;&#3632;&#3586;&#3634;&#3623;", "Give" : "", "Give Treatments" : "", "Give Vaccination" : "", "Given" : "", "Glen of Imaal Terrier" : "", "Go" : "", "Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "", "Go the options screen and set your shelter's contact details and other settings." : "", "Go the system users screen and add user accounts for your staff." : "", "Goat" : "&#3649;&#3614;&#3632;", "Golden" : "", "Golden Retriever" : "", "Goldfish" : "&#3611;&#3621;&#3634;&#3607;&#3629;&#3591;", "Good With Cats" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3649;&#3617;&#3623;", "Good With Children" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3648;&#3604;&#3655;&#3585;", "Good With Dogs" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3626;&#3640;&#3609;&#3633;&#3586;", "Good with Cats" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3649;&#3617;&#3623;", "Good with Children" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3648;&#3604;&#3655;&#3585;", "Good with Dogs" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3626;&#3640;&#3609;&#3633;&#3586;", "Good with cats" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3649;&#3617;&#3623;", "Good with children" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3648;&#3604;&#3655;&#3585;", "Good with dogs" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3626;&#3640;&#3609;&#3633;&#3586;", "Good with kids" : "&#3648;&#3611;&#3655;&#3609;&#3617;&#3636;&#3605;&#3619;&#3605;&#3656;&#3629;&#3649;&#3617;&#3623;", "Google+" : "", "Goose" : "", "Gordon Setter" : "", "Grade" : "", "Great Dane" : "", "Great Pyrenees" : "", "Greater Swiss Mountain Dog" : "", "Green" : "&#3648;&#3586;&#3637;&#3618;&#3623;", "Grey" : "&#3648;&#3607;&#3634;", "Grey and White" : "&#3648;&#3607;&#3634;&#3649;&#3621;&#3632;&#3586;&#3634;&#3623;", "Greyhound" : "", "Guinea Pig" : "", "Guinea fowl" : "", "HMRC Gift Aid Spreadsheet" : "", "HTML" : "", "HTML Publishing Templates" : "", "HTML/FTP Publisher" : "", "Hairless" : "", "Half-Yearly" : "&#3619;&#3634;&#3618;&#3588;&#3619;&#3638;&#3656;&#3591;&#3611;&#3637;", "Hamster" : "", "Harlequin" : "", "Havana" : "", "Havanese" : "", "Header" : "", "Health Problems" : "", "Health and Identification" : "", "Healthy" : "&#3626;&#3617;&#3610;&#3641;&#3619;&#3603;&#3660;", "Heartworm" : "", "Heartworm Test Date" : "", "Heartworm Test Result" : "", "Heartworm Tested" : "", "Heartworm+" : "", "Hedgehog" : "", "Held" : "", "Help" : "", "Hepatitis" : "", "Here are some things you should do before you start adding animals and people to your database." : "", "Hidden" : "", "Hidden Comments" : "", "Hidden comments about the animal" : "", "Hide deceased animals from the home page" : "", "High" : "", "Highlight" : "", "Himalayan" : "", "History" : "&#3611;&#3619;&#3632;&#3623;&#3633;&#3605;&#3636;", "Hold" : "", "Hold the animal until this date or blank to hold indefinitely" : "", "Hold until" : "", "Hold until {0}" : "", "Holland Lop" : "", "Home" : "", "Home Phone" : "", "Home page" : "", "Homecheck Areas" : "", "Homecheck Date" : "", "Homecheck History" : "", "Homecheck areas" : "", "Homechecked" : "", "Homechecked By" : "", "Homechecked by" : "", "Homechecker" : "", "Horizontal Pitch" : "", "Horse" : "&#3617;&#3657;&#3634;", "Hotot" : "", "Hound" : "", "Hours" : "", "Housetrained" : "", "Hovawart" : "", "How urgent is it that we take this animal?" : "", "Husky" : "", "I've finished, Don't show me this popup again." : "", "IP Restriction" : "", "IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "", "Ibizan Hound" : "", "If the shelter provides initial insurance cover to new adopters, the policy number" : "", "If this form has a populated emailaddress field during submission, send a confirmation email to it" : "", "If this is the web preferred image, web publishers will use these notes as the animal description" : "", "If this person is a fosterer, the maximum number of animals they can care for." : "", "If this person is a member, the date that membership expires." : "", "If this person is a member, their membership number" : "", "If this person is a member, their membership number." : "", "If this stock record is for a drug, the batch number from the container" : "", "If this stock record is for a perishable good, the expiry date on the container" : "", "If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "", "If you don't select any locations, publishers will include animals in all locations." : "", "Iguana" : "", "Illyrian Sheepdog" : "", "Image" : "", "Image file" : "", "Import" : "", "Import a CSV file" : "", "Import a PayPal CSV file" : "", "Import from file" : "", "Important" : "", "In" : "", "In SubTotal" : "", "In the last month" : "", "In the last quarter" : "", "In the last week" : "", "In the last year" : "", "In-Kind Donation" : "&#3610;&#3619;&#3636;&#3592;&#3634;&#3588;", "Inactive" : "", "Inactive - do not include" : "", "Incident" : "", "Incident - Additional" : "", "Incident - Citation" : "", "Incident - Details" : "", "Incident - Dispatch" : "", "Incident - Owner" : "", "Incident Between" : "", "Incident Completed Types" : "", "Incident Date/Time" : "", "Incident Type" : "", "Incident Types" : "", "Incident date cannot be blank" : "", "Incident followup" : "", "Incident {0} successfully created." : "", "Incident {0}, {1}: {2}" : "", "Incidents" : "", "Incidents Requiring Followup" : "", "Include CSV header line" : "", "Include Removed" : "", "Include animals in the following locations" : "", "Include animals on trial adoption" : "", "Include animals who don't have a description" : "&#3619;&#3623;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3652;&#3617;&#3656;&#3617;&#3637;&#3616;&#3634;&#3614;&#3606;&#3656;&#3634;&#3618;&#3604;&#3657;&#3623;&#3618;", "Include animals who don't have a picture" : "", "Include cruelty case animals" : "", "Include deceased animals" : "", "Include fostered animals" : "", "Include found" : "", "Include held animals" : "", "Include incomplete medical records when generating document templates" : "", "Include incomplete vaccination and test records when generating document templates" : "", "Include non-shelter animals" : "", "Include off-shelter animals in medical calendar and books" : "", "Include preferred photo" : "", "Include quarantined animals" : "", "Include reserved animals" : "", "Include retailer animals" : "", "Include returned" : "", "Include this image when publishing" : "", "Include unaltered animals" : "&#3619;&#3623;&#3617;&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3652;&#3617;&#3656;&#3617;&#3637;&#3616;&#3634;&#3614;&#3606;&#3656;&#3634;&#3618;&#3604;&#3657;&#3623;&#3618;", "Income" : "", "Income from an on-site shop" : "", "Income::" : "", "Income::Adoption" : "", "Income::Donation" : "", "Income::EntryDonation" : "", "Income::Interest" : "", "Income::OpeningBalances" : "", "Income::Shop" : "", "Income::Sponsorship" : "", "Income::WaitingList" : "", "Incoming" : "", "Incoming Forms" : "", "Incoming donations (misc)" : "", "Incoming forms are online forms that have been completed and submitted by people on the web." : "", "Incomplete incidents" : "", "Incomplete notes upto today" : "", "Index" : "", "Individual/Couple" : "", "Induct a new animal" : "", "Information" : "&#3586;&#3657;&#3629;&#3617;&#3641;&#3621;", "Initials" : "", "Install" : "", "Install the selected reports to your database" : "", "Insurance" : "", "Insurance No" : "", "Intake" : "", "Intakes {0}" : "", "Internal Location" : "", "Internal Locations" : "", "Invalid email address" : "", "Invalid email address '{0}'" : "", "Invalid microchip number length" : "", "Invalid time '{0}', times should be in 00:00 format" : "", "Invalid time, times should be in HH:MM format" : "", "Invalid username or password." : "", "Investigation" : "", "Investigations" : "", "Investigator" : "", "Invoice Only" : "", "Invoice items need a description and amount." : "", "Irish Setter" : "", "Irish Terrier" : "", "Irish Water Spaniel" : "", "Irish Wolfhound" : "", "Is this a permanent foster?" : "", "Is this a trial adoption?" : "", "Issue a new insurance number for this animal/adoption" : "", "Issue date and expiry date must be valid dates." : "", "Issued" : "", "Issued in the last month" : "", "Issued in the last week" : "", "Italian Greyhound" : "", "Italian Spinone" : "", "Item" : "", "Jack Russell Terrier" : "", "Jan" : "&#3617;.&#3588;.", "January" : "&#3617;&#3585;&#3619;&#3634;&#3588;&#3617;", "Japanese Bobtail" : "", "Japanese Chin" : "", "Javanese" : "", "Jersey Wooly" : "", "Jindo" : "", "Jul" : "&#3585;.&#3588;.", "July" : "&#3585;&#3619;&#3585;&#3598;&#3634;&#3588;&#3617;", "Jump to diary" : "", "Jump to donations" : "", "Jump to media" : "", "Jump to movements" : "", "Jun" : "&#3617;&#3636;.&#3618;.", "June" : "&#3617;&#3636;&#3606;&#3640;&#3609;&#3634;&#3618;&#3609;", "Jurisdiction" : "&#3594;&#3656;&#3623;&#3591;&#3619;&#3632;&#3618;&#3632;", "Jurisdictions" : "", "Kai Dog" : "", "Kakariki" : "", "Karelian Bear Dog" : "", "Keep table headers visible when scrolling" : "", "Keeshond" : "", "Kennel" : "", "Kerry Blue Terrier" : "", "Kishu" : "", "Kittens (under {0} months)" : "", "Km" : "", "Komondor" : "", "Korat" : "", "Kuvasz" : "", "Kyi Leo" : "", "Label" : "", "Labrador Retriever" : "", "Lakeland Terrier" : "", "Lancashire Heeler" : "", "Large" : "&#3651;&#3627;&#3597;&#3656;", "Last First" : "", "Last Location" : "", "Last Month" : "", "Last Name" : "", "Last Week" : "", "Last changed by {0} on {1}" : "", "Last name" : "", "Last, First" : "", "Latency" : "", "Latency Tester" : "", "Least recently changed" : "", "Leave" : "", "Leave of absence" : "", "Left Margin" : "", "Left shelter" : "", "Leonberger" : "", "Leptospirosis" : "", "Letter" : "", "Lhasa Apso" : "", "Liability" : "", "Licence for {0} successfully renewed {1} - {2}" : "", "License" : "", "License Number" : "", "License Types" : "", "License number '{0}' has already been issued." : "", "License numbers matching '{0}'." : "", "License requires a number" : "", "License requires a person" : "", "License requires issued and expiry dates" : "", "Licenses" : "", "Licensing" : "", "Lifetime" : "", "Light Amber" : "", "Lilac" : "", "Lilac Tortie" : "", "Limited to {0} matches" : "", "Link" : "&#3621;&#3636;&#3591;&#3588;&#3660;", "Link an animal" : "", "Link to an external web resource" : "", "Link to this animal" : "", "Links" : "&#3621;&#3636;&#3591;&#3588;&#3660;", "List" : "", "Litter" : "", "Litter Ref" : "", "Litter Reference" : "", "Littermates" : "", "Litters" : "", "Litters need at least a required date and number." : "", "Live Releases {0}" : "&#3623;&#3633;&#3609;&#3607;&#3637;&#3656;&#3611;&#3621;&#3656;&#3629;&#3618;", "Liver" : "", "Liver and White" : "&#3648;&#3607;&#3634;&#3649;&#3621;&#3632;&#3586;&#3634;&#3623;", "Lizard" : "", "Llama" : "&#3621;&#3634;&#3617;&#3632;", "Loading..." : "", "Loan" : "", "Local" : "&#3607;&#3637;&#3656;&#3605;&#3633;&#3657;&#3591;", "Locale" : "", "Location" : "&#3607;&#3637;&#3656;&#3605;&#3633;&#3657;&#3591;", "Location Filter" : "", "Location and Species" : "", "Location and Type" : "", "Location and Unit" : "", "Locations" : "&#3607;&#3637;&#3656;&#3605;&#3633;&#3657;&#3591;", "Log" : "&#3611;&#3641;&#3617;&#3610;&#3633;&#3609;&#3607;&#3638;&#3585;", "Log Text" : "", "Log Type" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3586;&#3629;&#3591;&#3611;&#3641;&#3617;&#3610;&#3633;&#3609;&#3607;&#3638;&#3585;", "Log Types" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3586;&#3629;&#3591;&#3611;&#3641;&#3617;&#3610;&#3633;&#3609;&#3607;&#3638;&#3585;", "Log date must be a valid date" : "", "Log entries need a date and text." : "", "Log requires a date." : "", "Log requires a person." : "", "Log requires an animal." : "", "Log successfully added." : "", "Login" : "&#3648;&#3586;&#3657;&#3634;&#3626;&#3641;&#3656;&#3619;&#3632;&#3610;&#3610;", "Logout" : "&#3629;&#3629;&#3585;&#3592;&#3634;&#3585;&#3619;&#3632;&#3610;&#3610;", "Long" : "&#3618;&#3634;&#3623;", "Long term" : "", "Longest On Shelter" : "", "Looking For" : "", "Looking for" : "", "Lookup" : "", "Lookup (Multiple Select)" : "", "Lookup Values" : "", "Lookup data" : "", "Lookups" : "", "Lop Eared" : "", "Lory/Lorikeet" : "", "Lost" : "&#3649;&#3617;&#3623;", "Lost Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3626;&#3641;&#3597;&#3627;&#3634;&#3618;", "Lost Animal - Additional" : "", "Lost Animal - Details" : "", "Lost Animal Contact" : "", "Lost Animal: {0}" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3626;&#3641;&#3597;&#3627;&#3634;&#3618;: {0}", "Lost and Found" : "", "Lost and found entries must have a contact" : "", "Lost animal - {0} {1} [{2}]" : "", "Lost animal entries matching '{0}'." : "", "Lost animal entry {0} successfully created." : "", "Lost animals must have a contact" : "", "Lost animals reported in the last 30 days." : "", "Lost from" : "", "Lost to" : "", "Lost/Found" : "", "Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "", "Lovebird" : "", "Low" : "", "Lowchen" : "", "Lowest" : "", "M (Miscellaneous)" : "", "MM = current month" : "", "Macaw" : "", "Mail" : "", "Mail Merge" : "", "Mail Merge - {0}" : "", "Maine Coon" : "", "Make this the default image when creating documents" : "", "Make this the default image when viewing this record and publishing to the web" : "", "Make this the default video link when publishing to the web" : "", "Male" : "&#3648;&#3614;&#3624;&#3612;&#3641;&#3657;", "Maltese" : "", "Manchester Terrier" : "", "Mandatory" : "", "Manual" : "", "Manually enter codes (do not generate)" : "", "Manufacturer" : "", "Manx" : "", "Map" : "", "Map of active incidents" : "", "Mar" : "&#3617;&#3637;.&#3588;.", "March" : "&#3617;&#3637;&#3609;&#3634;&#3588;&#3617;", "Maremma Sheepdog" : "", "Mark Deceased" : "", "Mark an animal deceased" : "", "Mark dispatched now" : "", "Mark new animals as not for adoption" : "", "Mark responded now" : "", "Mark selected payments received" : "", "Mark this owner homechecked" : "", "Mark treatments given" : "", "Marketer" : "", "Markings" : "&#3588;&#3635;&#3648;&#3605;&#3639;&#3629;&#3609;", "Markup" : "", "Marriage/Relationship split" : "", "Mastiff" : "", "Match" : "&#3617;&#3637;&#3609;&#3634;&#3588;&#3617;", "Match Lost and Found" : "", "Match against other lost/found animals" : "", "Match lost and found animals" : "", "Match this animal with the lost and found database" : "", "Maternity" : "", "May" : "&#3614;.&#3588;.", "McNab" : "", "Media" : "&#3617;&#3637;&#3648;&#3604;&#3637;&#3618;", "Media Notes" : "", "Media notes contain" : "", "Medical" : "", "Medical Book" : "", "Medical Profiles" : "", "Medical book" : "", "Medical calendar" : "", "Medical profiles" : "", "Medical profiles need a profile name, treatment, dosage and frequencies." : "", "Medical regimens need an animal, name, dosage, a start date and frequencies." : "", "Medicate" : "", "Medicate Animal" : "", "Medium" : "&#3585;&#3621;&#3634;&#3591;", "Member" : "&#3626;&#3617;&#3634;&#3594;&#3636;&#3585;", "Membership Expiry" : "", "Membership Number" : "", "Merge" : "", "Merge Person" : "", "Merge another animal into this one" : "", "Merge another person into this one" : "", "Merge bonded animals into a single record" : "", "Merge duplicate records" : "", "Message" : "", "Message Board" : "", "Message from {0}" : "", "Message successfully sent to {0}" : "", "Messages" : "", "Messages successfully sent" : "", "Method" : "", "Microchip" : "", "Microchip Date" : "", "Microchip Number" : "", "Microchip number {0} has already been allocated to another animal." : "", "Microchipped" : "", "Miles" : "", "Mini Rex" : "", "Mini-Lop" : "", "Miniature Pinscher" : "", "Minutes" : "", "Missouri Foxtrotter" : "", "Mixed Breed" : "&#3626;&#3634;&#3618;&#3614;&#3633;&#3609;&#3608;&#3640;&#3660;", "Mo" : "", "Mobile signing pad" : "", "Modify Additional Fields" : "", "Modify Document Templates" : "", "Modify Lookups" : "", "Mon" : "", "Monday" : "", "Money" : "&#3648;&#3591;&#3636;&#3609;", "Month" : "", "Monthly" : "&#3619;&#3634;&#3618;&#3648;&#3604;&#3639;&#3629;&#3609;", "More Info Needed" : "", "More Medications" : "", "More Tests" : "", "More Vaccinations" : "", "More diary notes" : "", "Morgan" : "", "Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "", "Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "", "Most recently changed" : "", "Most relevant" : "", "Mother" : "", "Mountain Cur" : "", "Mountain Dog" : "", "Mouse" : "", "Move" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Move an animal to a retailer" : "", "Moved to animal record {0}" : "", "Movement" : "", "Movement Date" : "", "Movement Number" : "", "Movement Type" : "", "Movement Types" : "", "Movement dates clash with an existing movement." : "", "Movement numbers must be unique." : "", "Movements" : "", "Movements require an animal" : "", "Movements require an animal." : "", "Moving..." : "", "Multi-Lookup" : "", "Multiple Treatments" : "", "Munchkin" : "", "Munsterlander" : "", "Mustang" : "", "My Fosters" : "", "My Incidents" : "", "My Undispatched Incidents" : "", "My diary notes" : "", "My sheltermanager.com account" : "", "Mynah" : "", "N (Non-Shelter Animal)" : "", "NNN or NN = number unique for this type of animal for this year" : "", "Name" : "&#3594;&#3639;&#3656;&#3629;", "Name Contains" : "", "Name and Address" : "", "Name cannot be blank" : "", "Name contains" : "", "Neapolitan Mastiff" : "", "Negative" : "", "Neglect" : "", "Netherland Dwarf" : "", "Neuter/Spay" : "", "Neutered" : "", "Neutered/Spayed Non-Shelter Animals In {0}" : "", "Neutered/Spayed Shelter Animals In {0}" : "", "New" : "&#3651;&#3627;&#3617;&#3656;", "New Account" : "", "New Appointment" : "", "New Citation" : "", "New Cost" : "", "New Diary" : "", "New Diet" : "", "New Document" : "", "New Field" : "", "New Fosterer" : "", "New Guinea Singing Dog" : "", "New Item" : "", "New License" : "", "New Litter" : "", "New Log" : "&#3611;&#3641;&#3617;&#3610;&#3633;&#3609;&#3607;&#3638;&#3585;&#3651;&#3627;&#3617;&#3656;", "New Movement" : "", "New Owner" : "", "New Password" : "", "New Payment" : "", "New Profile" : "", "New Record" : "", "New Regimen" : "", "New Report" : "", "New Role" : "", "New Stock" : "", "New Task" : "", "New Template" : "", "New Test" : "", "New Transport" : "", "New Trap Loan" : "", "New User" : "", "New Vaccination" : "", "New Voucher" : "", "New Waiting List Entry" : "", "New Zealand" : "", "New diary task" : "", "New form field" : "", "New name" : "", "New online form" : "", "New password and confirmation password don't match." : "", "New task detail" : "", "New template" : "", "Newfoundland Dog" : "", "Next" : "&#3586;&#3657;&#3629;&#3588;&#3623;&#3634;&#3617;", "No" : "&#3652;&#3617;&#3656;", "No adjustment" : "", "No data to show on the report." : "", "No data." : "", "No description" : "&#3588;&#3635;&#3629;&#3608;&#3636;&#3610;&#3634;&#3618;", "No longer retained" : "", "No matches found." : "", "No picture" : "", "No publishers are running." : "", "No results found." : "", "No results." : "", "No tasks are running." : "", "No view permission for this report" : "", "Noise" : "", "Non-Shelter" : "", "Non-Shelter Animal" : "", "Non-Shelter Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "Non-shelter Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "None" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Norfolk Terrier" : "", "Normal user" : "", "Norwegian Buhund" : "", "Norwegian Elkhound" : "", "Norwegian Forest Cat" : "", "Norwegian Lundehund" : "", "Norwich Terrier" : "", "Not Arrived" : "", "Not Available For Adoption" : "", "Not Available for Adoption" : "", "Not For Adoption" : "", "Not Microchipped" : "", "Not Reconciled" : "", "Not available for adoption" : "", "Not dispatched" : "", "Not for adoption" : "", "Not for adoption flag set" : "", "Not in chosen publisher location" : "", "Not reconciled" : "", "Note" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Notes" : "&#3652;&#3617;&#3656;&#3617;&#3637;", "Notes about the death of the animal" : "", "Nov" : "&#3614;.&#3618;.", "Nova Scotia Duck-Tolling Retriever" : "", "November" : "&#3614;&#3620;&#3624;&#3592;&#3636;&#3585;&#3634;&#3618;&#3609;", "Now" : "", "Number" : "", "Number in litter" : "", "Number of Tasks" : "", "Number of animal links to show" : "", "Number of fields" : "", "Number of pets" : "", "Ocicat" : "", "Oct" : "&#3605;.&#3588;.", "October" : "&#3605;&#3640;&#3621;&#3634;&#3588;&#3617;", "Office" : "", "Old English Sheepdog" : "", "Old Password" : "", "Omit criteria" : "", "Omit header/footer" : "", "On Foster (in figures)" : "", "On Shelter" : "", "On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "", "On shelter for {0} days. Total cost: {1}" : "", "Once assigned, codes cannot be changed" : "", "Once signed, this document cannot be edited or tampered with." : "", "One Off" : "", "One-Off" : "", "Online Form: {0}" : "", "Online Forms" : "", "Online form fields need a name and label." : "", "Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "", "Only PDF, HTML and JPG image files can be attached." : "", "Only active accounts" : "", "Only allow users with one of these roles to view this incident" : "", "Only show account totals for the current period, which starts on " : "", "Only show declawed" : "", "Only show pickups" : "", "Only show special needs" : "", "Only show transfers" : "", "Open Incidents" : "", "Open records in a new browser tab" : "", "Open reports in a new browser tab" : "", "Opening balances" : "", "Optional, the date the vaccination \"wears off\" and needs to be administered again" : "", "Options" : "", "Or move this diary on to" : "", "Order published animals by" : "", "Organisation" : "", "Organization" : "", "Organization name" : "", "Oriental Long Hair" : "", "Oriental Short Hair" : "", "Oriental Tabby" : "", "Original Owner" : "", "Ostrich" : "", "Other Account" : "", "Other Organisation" : "", "Other Shelter" : "", "Otterhound" : "", "Our shelter does trial adoptions, allow us to mark these on movement screens" : "", "Out" : "", "Out Between" : "", "Out SubTotal" : "", "Output a deceased animals page" : "", "Output a page with links to available online forms" : "", "Output a separate page for each animal type" : "", "Output a separate page for each species" : "", "Output an adopted animals page" : "", "Output an rss.xml page" : "", "Overdue" : "", "Overdue medical items" : "", "Overtime" : "", "Owl" : "", "Owner" : "&#3648;&#3592;&#3657;&#3634;&#3586;&#3629;&#3591;", "Owner Vet" : "", "Owner given citation" : "", "Owners Vet" : "", "PM" : "", "Page extension" : "", "Paid" : "", "Paint/Pinto" : "", "Palomino" : "", "Paper Size" : "", "Papillon" : "", "Parainfluenza" : "", "Parakeet (Other)" : "", "Parent" : "", "Parrot (Other)" : "", "Parrotlet" : "", "Parvovirus" : "", "Paso Fino" : "", "Pass Homecheck" : "", "Password" : "", "Password for '{0}' has been reset." : "", "Password is incorrect." : "", "Password successfully changed." : "", "Passwords cannot be blank." : "", "Path" : "", "Patterdale Terrier (Fell Terrier)" : "", "PayPal" : "", "Payment" : "", "Payment Book" : "", "Payment From" : "", "Payment Methods" : "", "Payment Type" : "", "Payment Types" : "", "Payment book" : "", "Payment calendar" : "", "Payment of {0} successfully received ({1})." : "", "Payments" : "", "Payments need at least one date, an amount and a person." : "", "Payments of type" : "", "Payments require a person" : "", "Payments require a received date" : "", "Peacock/Pea fowl" : "", "Pekingese" : "", "Pending Adoption" : "", "Pending Apartment Verification" : "", "Pending Home Visit" : "", "Pending Vet Check" : "", "Pension" : "", "People" : "", "People Looking For" : "", "People matching '{0}'." : "", "People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "", "People with active reservations, but no homecheck has been done." : "", "People with overdue donations." : "", "Percheron" : "", "Perform" : "", "Perform Homecheck" : "", "Perform Test" : "", "Performed" : "", "Permanent Foster" : "", "Persian" : "", "Person" : "", "Person - Additional" : "", "Person - Name and Address" : "", "Person - Type" : "", "Person Flags" : "", "Person looking for report" : "", "Person successfully created" : "", "Personal" : "", "Peruvian Inca Orchid" : "", "Peruvian Paso" : "", "Petit Basset Griffon Vendeen" : "", "Pharaoh Hound" : "", "Pheasant" : "", "Phone" : "&#3650;&#3607;&#3619;", "Phone contains" : "", "Photo successfully uploaded." : "", "Picked Up" : "", "Picked Up By" : "", "Pickup" : "", "Pickup Address" : "", "Pickup Location" : "", "Pickup Locations" : "", "Pig" : "&#3627;&#3617;&#3641;", "Pig (Farm)" : "&#3627;&#3617;&#3641; (&#3615;&#3634;&#3619;&#3660;&#3617;)", "Pigeon" : "&#3609;&#3585;&#3614;&#3636;&#3619;&#3634;&#3610;", "Pinterest" : "", "Pionus" : "", "Pit Bull Terrier" : "", "Pixie-Bob" : "", "Please click the Sign button when you are finished." : "", "Please see the manual for more information." : "", "Please select a PDF, HTML or JPG image file to attach" : "", "Please tighten the scope of your email campaign to {0} emails or less." : "", "Please use the links below to electronically sign these documents." : "", "Plott Hound" : "", "Poicephalus/Senegal" : "", "Pointer" : "", "Points for being found within 2 weeks of being lost" : "", "Points for matching age group" : "", "Points for matching breed" : "", "Points for matching color" : "", "Points for matching features" : "", "Points for matching lost/found area" : "", "Points for matching sex" : "", "Points for matching species" : "", "Points for matching zipcode" : "", "Points required to appear on match report" : "", "Polish" : "", "Polish Lowland Sheepdog" : "", "Pomeranian" : "", "Pony" : "", "Poodle" : "", "Portugese Podengo" : "", "Portuguese Water Dog" : "", "Positive" : "", "Positive for Heartworm, FIV or FLV" : "", "Positive/Negative" : "", "Post" : "&#3619;&#3627;&#3633;&#3626;&#3652;&#3611;&#3619;&#3625;&#3603;&#3637;&#3618;&#3660;", "Postage costs" : "", "Pot Bellied" : "", "Prairie Dog" : "", "Prefill new media notes for animal images with animal comments if left blank" : "", "Prefill new media notes with the filename if left blank" : "", "Premises" : "", "Presa Canario" : "", "Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "", "Preview" : "", "Previous" : "", "Previous Adopter" : "", "Print" : "&#3614;&#3636;&#3617;&#3614;&#3660;", "Print Preview" : "", "Print selected forms" : "", "Printable Manual" : "", "Printing word processor documents uses hidden iframe and window.print" : "", "Priority" : "", "Priority Floor" : "", "Produce a CSV File" : "", "Produce a PDF of printable labels" : "", "Profile" : "", "Profile name cannot be blank" : "", "Public Holiday" : "", "Publish Animals to the Internet" : "", "Publish HTML via FTP" : "", "Publish now" : "", "Publish to folder" : "", "Published to Website" : "", "Publisher" : "", "Publisher Breed" : "", "Publisher Color" : "", "Publisher Logs" : "", "Publisher Species" : "", "Publishing" : "", "Publishing History" : "", "Publishing Logs" : "", "Publishing Options" : "", "Publishing complete." : "", "Publishing template" : "", "Pug" : "", "Puli" : "", "Pumi" : "", "Puppies (under {0} months)" : "", "Purchased" : "", "Qty" : "", "Quaker Parakeet" : "", "Quantity" : "", "Quarantine" : "", "Quarterhorse" : "", "Quarterly" : "&#3619;&#3634;&#3618;&#3652;&#3605;&#3619;&#3617;&#3634;&#3626;&#3605;&#3660;", "Quick Links" : "", "Quicklinks" : "", "Quicklinks are shown on the home page and allow quick access to areas of the system." : "", "R" : "", "Rabbit" : "&#3585;&#3619;&#3632;&#3605;&#3656;&#3634;&#3618;", "Rabies" : "", "Rabies Tag" : "", "RabiesTag" : "", "Radio Buttons" : "", "Ragamuffin" : "", "Ragdoll" : "", "Rank" : "&#3629;&#3633;&#3609;&#3604;&#3633;&#3610;", "Rat" : "", "Rat Terrier" : "", "Raw Markup" : "", "Read the manual for more information about Animal Shelter Manager." : "", "Real name" : "", "Reason" : "&#3648;&#3627;&#3605;&#3640;&#3612;&#3621;", "Reason For Appointment" : "", "Reason Not From Owner" : "", "Reason for Entry" : "", "Reason for entry" : "", "Reason not from Owner" : "", "Reason the owner did not bring in the animal themselves" : "", "Recalculate ALL animal ages/times" : "", "Recalculate ALL animal locations" : "", "Recalculate on-shelter animal locations" : "", "Receipt No" : "", "Receipt/Invoice" : "", "Receive" : "", "Receive a donation" : "", "Receive a payment" : "", "Received" : "", "Received in last day" : "", "Received in last month" : "", "Received in last week" : "", "Received in last year" : "", "Received today" : "", "Recently Adopted" : "", "Recently Changed" : "", "Recently Entered Shelter" : "", "Recently Fostered" : "", "Recently deceased" : "", "Recently deceased shelter animals (last 30 days)." : "", "Reception" : "", "Reclaim" : "", "Reclaim an animal" : "", "Reclaim movements must have a valid reclaim date." : "", "Reclaim successfully created." : "", "Reclaimed" : "", "Reconcile" : "", "Reconciled" : "", "Redbone Coonhound" : "", "Rediarised" : "", "Redirect to URL after POST" : "", "Reference" : "", "Refresh" : "", "Regenerate 'Match lost and found animals' report" : "", "Regenerate 'Person looking for' report" : "", "Regenerate annual animal figures for" : "", "Regenerate monthly animal figures for" : "", "Regenerate person names in selected format" : "", "Register Microchip" : "", "Register microchips after" : "", "Released To Wild" : "", "Released To Wild {0}" : "", "Reload" : "", "Remaining" : "", "Remember me on this computer" : "", "Removal" : "&#3648;&#3629;&#3634;&#3629;&#3629;&#3585;", "Removal Reason" : "", "Removal reason" : "", "Remove" : "", "Remove HTML and PDF document media after this many years" : "", "Remove clinic functionality from screens and menus" : "", "Remove fine-grained animal control incident permissions" : "", "Remove holds after" : "", "Remove move menu and the movements tab from animal and person screens" : "", "Remove personally identifiable data" : "", "Remove previously published files before uploading" : "", "Remove retailer functionality from the movement screens and menus" : "", "Remove short shelter code box from the animal details screen" : "", "Remove the FIV/L test fields from animal health details" : "", "Remove the Litter ID field from animal details" : "", "Remove the Rabies Tag field from animal health details" : "", "Remove the adoption coordinator field from animal entry details" : "", "Remove the adoption fee field from animal details" : "", "Remove the animal control functionality from menus and screens" : "", "Remove the bonded with fields from animal entry details" : "", "Remove the city/state fields from person details" : "", "Remove the coat type field from animal details" : "", "Remove the declawed box from animal health details" : "", "Remove the document repository functionality from menus" : "", "Remove the good with fields from animal notes" : "", "Remove the heartworm test fields from animal health details" : "", "Remove the insurance number field from the movement screens" : "", "Remove the location unit field from animal details" : "", "Remove the microchip fields from animal identification details" : "", "Remove the neutered fields from animal health details" : "", "Remove the online form functionality from menus" : "", "Remove the picked up fields from animal entry details" : "", "Remove the rota functionality from menus and screens" : "", "Remove the size field from animal details" : "", "Remove the stock control functionality from menus and screens" : "", "Remove the tattoo fields from animal identification details" : "", "Remove the transport functionality from menus and screens" : "", "Remove the trap loan functionality from menus and screens" : "", "Remove the weight field from animal details" : "", "Removed" : "", "Rename" : "", "Renew License" : "", "Renew licence" : "", "Renew license" : "", "Report" : "&#3619;&#3634;&#3618;&#3591;&#3634;&#3609;", "Report Title" : "", "Report a new incident" : "", "Reports" : "&#3619;&#3634;&#3618;&#3591;&#3634;&#3609;", "Request signature by email" : "", "Requested" : "", "Require followup" : "", "Required" : "", "Required date must be a valid date" : "", "Reschedule" : "", "Reservation" : "", "Reservation Book" : "", "Reservation Cancelled" : "", "Reservation Date" : "", "Reservation For" : "", "Reservation Status" : "", "Reservation Statuses" : "", "Reservation book" : "", "Reservation date cannot be after cancellation date." : "", "Reservation successfully created." : "", "Reservations must have a valid reservation date." : "", "Reserve" : "", "Reserve an animal" : "", "Reserved" : "", "Reset" : "", "Reset Password" : "&#3619;&#3637;&#3648;&#3595;&#3655;&#3605;&#3619;&#3627;&#3633;&#3626;&#3612;&#3656;&#3634;&#3609;", "Respond" : "", "Responded" : "", "Responded Between" : "", "Responded Date/Time" : "", "Result" : "", "Results" : "", "Results for '{0}'." : "", "Retailer" : "", "Retailer Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "Retailer Book" : "", "Retailer book" : "", "Retailer movement successfully created." : "", "Retailer movements must have a valid movement date." : "", "Retriever" : "", "Return" : "", "Return Category" : "", "Return Date" : "", "Return a transferred animal" : "", "Return an animal from adoption" : "", "Return an animal from another movement" : "", "Return an animal from transfer" : "", "Return date cannot be before the movement date." : "", "Return this movement and bring the animal back to the shelter" : "", "Returned" : "", "Returned By" : "&#3626;&#3619;&#3657;&#3634;&#3591;&#3650;&#3604;&#3618;", "Returned To Owner" : "", "Returned from" : "", "Returned to" : "", "Returned to Owner {0}" : "", "Returning" : "", "Returns {0}" : "", "Reupload animal images every time" : "", "Rex" : "", "Rhea" : "", "Rhinelander" : "", "Rhodesian Ridgeback" : "", "Ringneck/Psittacula" : "", "Role is in use and cannot be deleted." : "", "Roles" : "", "Roles need a name." : "", "Rosella" : "", "Rostered day off" : "", "Rota" : "", "Rota Types" : "", "Rota cloned successfully." : "", "Rotate image 90 degrees anticlockwis" : "", "Rotate image 90 degrees clockwise" : "", "Rottweiler" : "", "Rough" : "", "Rows" : "", "Ruddy" : "", "Russian Blue" : "", "S (Stray Cat)" : "", "S = first letter of animal species" : "", "SM Account" : "", "SMS" : "", "SQL" : "SQL", "SQL Interface" : "", "SQL dump" : "", "SQL dump (ASM2 HSQLDB Format)" : "", "SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "", "SQL interface" : "", "SQL is syntactically correct." : "", "SS = first and second letter of animal species" : "", "Sa" : "", "Saddlebred" : "", "Saint Bernard St. Bernard" : "", "Sales Tax" : "", "Saluki" : "", "Samoyed" : "", "Sat" : "", "Satin" : "", "Saturday" : "", "Save" : "", "Save and leave" : "", "Save this incident" : "", "Save this person" : "", "Save this record" : "", "Save this waiting list entry" : "", "Saving..." : "", "Scale published animal images to" : "", "Scheduled" : "", "Schipperke" : "", "Schnauzer" : "", "Scottish Deerhound" : "", "Scottish Fold" : "", "Scottish Terrier Scottie" : "", "Script" : "", "Seal" : "", "Sealyham Terrier" : "", "Search" : "&#3588;&#3657;&#3609;&#3627;&#3634;", "Search Results for '{0}'" : "", "Search returned {0} results." : "", "Search sort order" : "", "Searchable" : "", "Second offence" : "", "Select" : "&#3648;&#3621;&#3639;&#3629;&#3585;", "Select a person" : "", "Select a person to attach this form to." : "", "Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select all" : "", "Select an animal" : "&#3648;&#3621;&#3639;&#3629;&#3585;&#3626;&#3633;&#3605;&#3623;&#3660;", "Select an animal to attach this form to." : "", "Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select animal to merge" : "&#3648;&#3621;&#3639;&#3629;&#3585;&#3626;&#3633;&#3605;&#3623;&#3660;", "Select animals" : "&#3648;&#3621;&#3639;&#3629;&#3585;&#3626;&#3633;&#3605;&#3623;&#3660;", "Select date for diary task" : "", "Select person to merge" : "", "Select recommended" : "", "Selected On-Shelter Animals" : "&#3621;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3626;&#3641;&#3597;&#3627;&#3634;&#3618;", "Selkirk Rex" : "", "Send" : "&#3626;&#3656;&#3591;", "Send Emails" : "", "Send a weekly email to fosterers with medical information about their animals" : "&#3586;&#3657;&#3629;&#3617;&#3641;&#3621;&#3648;&#3614;&#3636;&#3656;&#3617;&#3648;&#3605;&#3636;&#3617;&#3648;&#3585;&#3637;&#3656;&#3618;&#3623;&#3585;&#3633;&#3610;&#3626;&#3633;&#3605;&#3623;&#3660;", "Send confirmation email to form submitter" : "", "Send emails" : "", "Send mass emails and perform mail merges" : "", "Send via email" : "", "Sending {0} emails is considered abusive and will damage the reputation of the email server." : "", "Sending..." : "&#3585;&#3635;&#3621;&#3633;&#3591;&#3592;&#3633;&#3604;&#3648;&#3619;&#3637;&#3618;&#3591;...", "Senior" : "", "Sent to mobile signing pad." : "", "Sep" : "&#3585;.&#3618;.", "Separate waiting list rank by species" : "", "September" : "&#3585;&#3633;&#3609;&#3618;&#3634;&#3618;&#3609;", "Server clock adjustment" : "", "Set publishing options" : "", "Set this to 0 to never automatically remove." : "", "Set to 0 to never update urgencies." : "", "Set wether or not this user account can log in to the user interface." : "", "Setter" : "", "Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "", "Settings" : "&#3585;&#3634;&#3619;&#3605;&#3633;&#3657;&#3591;&#3588;&#3656;&#3634;", "Settings, Lookup data" : "", "Settings, Options" : "", "Settings, Reports" : "", "Settings, System user accounts" : "", "Sex" : "&#3648;&#3614;&#3624;", "Sex and Species" : "&#3648;&#3621;&#3639;&#3629;&#3585;&#3626;&#3611;&#3637;&#3594;&#3637;&#3656;&#3626;&#3660;", "Sexes" : "", "Shar Pei" : "", "Share" : "", "Shared weblink" : "", "Shares" : "", "Sheep" : "&#3649;&#3585;&#3632;", "Sheep Dog" : "", "Shelter" : "", "Shelter Animal" : "", "Shelter Animals" : "", "Shelter Details" : "", "Shelter animal {0} '{1}'" : "", "Shelter animals" : "", "Shelter code cannot be blank" : "", "Shelter code {0} has already been allocated to another animal." : "", "Shelter stats (all time)" : "", "Shelter stats (this month)" : "", "Shelter stats (this week)" : "", "Shelter stats (this year)" : "", "Shelter stats (today)" : "", "Shelter view" : "", "Shepherd" : "", "Shetland Sheepdog Sheltie" : "", "Shiba Inu" : "", "Shift" : "", "Shih Tzu" : "", "Short" : "&#3626;&#3633;&#3657;&#3609;", "Show GDPR Contact Opt-In field on person screens" : "", "Show PDF files inline instead of sending them as attachments" : "", "Show a cost field on medical/test/vaccination screens" : "", "Show a minimap of the address on person screens" : "", "Show a separate paid date field with costs" : "", "Show alerts on the home page" : "", "Show animal thumbnails in movement and medical books" : "", "Show animals adopted" : "", "Show codes on the shelter view screen" : "", "Show complete comments in table views" : "", "Show empty locations" : "", "Show on new record screens" : "", "Show quick links on all pages" : "", "Show quick links on the home page" : "", "Show report menu items in collapsed categories" : "", "Show short shelter codes on screens" : "", "Show the adoption fee field" : "", "Show the altered fields" : "", "Show the breed fields" : "", "Show the brought in by field" : "", "Show the color field" : "", "Show the date brought in field" : "", "Show the entry category field" : "", "Show the full diary (instead of just my notes) on the home page" : "", "Show the hold fields" : "", "Show the internal location field" : "", "Show the litter ID field" : "", "Show the location unit field" : "", "Show the microchip fields" : "", "Show the original owner field" : "", "Show the size field" : "", "Show the tattoo fields" : "", "Show the time brought in field" : "", "Show the transfer in field" : "", "Show the weight field" : "", "Show timeline on the home page" : "", "Show tips on the home page" : "", "Show transactions from" : "", "Show weight as lb rather than kg" : "", "Showing {0} timeline events." : "", "Siamese" : "", "Siberian" : "", "Siberian Husky" : "", "Sick leave" : "", "Sick/Injured" : "&#3611;&#3656;&#3623;&#3618;/&#3610;&#3634;&#3604;&#3648;&#3592;&#3655;&#3610;", "Sick/injured animal" : "", "Sign" : "", "Sign document" : "", "Sign on screen" : "", "Signature" : "", "Signed" : "", "Signing" : "", "Signing Pad" : "", "Signup" : "", "Silky Terrier" : "", "Silver" : "", "Silver Fox" : "", "Silver Marten" : "", "Similar Animal" : "", "Similar Person" : "", "Simple" : "&#3629;&#3618;&#3656;&#3634;&#3591;&#3591;&#3656;&#3634;&#3618;", "Singapura" : "", "Single Treatment" : "", "Site" : "&#3586;&#3609;&#3634;&#3604;", "Sites" : "&#3586;&#3609;&#3634;&#3604;", "Size" : "&#3586;&#3609;&#3634;&#3604;", "Sizes" : "&#3586;&#3609;&#3634;&#3604;", "Skunk" : "", "Skye Terrier" : "", "Sloughi" : "", "Small" : "&#3648;&#3621;&#3655;&#3585;", "SmartTag PETID" : "", "Smooth Fox Terrier" : "", "Snake" : "&#3591;&#3641;", "Snowshoe" : "", "Social" : "", "Softbill (Other)" : "", "Sold" : "", "Somali" : "", "Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "", "Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "", "Some info text" : "", "Sorrel" : "", "Sorrel Tortoiseshell" : "", "Sorry, this document has already been signed" : "", "South Russian Ovcharka" : "", "Spaniel" : "", "Special Needs" : "", "Species" : "&#3626;&#3611;&#3637;&#3594;&#3637;&#3656;&#3626;&#3660;", "Species A-Z" : "", "Species Z-A" : "", "Species to use when publishing to third party services and adoption sites" : "", "Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "", "Sphynx (hairless cat)" : "", "Spitz" : "", "Split baby/adult age at" : "", "Split species pages with a baby/adult prefix" : "", "Sponsorship donations" : "", "Staff" : "", "Staff Rota" : "", "Staff record" : "", "Staff rota" : "", "Staffordshire Bull Terrier" : "", "Standard" : "", "Standardbred" : "", "Start Date" : "", "Start Of Day" : "", "Start Time" : "", "Start at" : "", "Start date" : "", "Start date must be a valid date" : "", "Start of year" : "", "Started" : "", "Starts" : "&#3626;&#3606;&#3634;&#3609;&#3632;", "State" : "&#3626;&#3606;&#3634;&#3609;&#3632;", "State contains" : "", "Stationary costs" : "", "Stats" : "&#3626;&#3606;&#3634;&#3609;&#3632;", "Stats period" : "", "Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "", "Status" : "&#3626;&#3606;&#3634;&#3609;&#3632;", "Status and Species" : "&#3648;&#3621;&#3639;&#3629;&#3585;&#3626;&#3611;&#3637;&#3594;&#3637;&#3656;&#3626;&#3660;", "Stay" : "", "Stock" : "", "Stock Control" : "", "Stock Levels" : "", "Stock Locations" : "", "Stock Take" : "", "Stock Usage Type" : "", "Stock level must have a name" : "", "Stock level must have a unit" : "", "Stock needs a name and unit." : "", "Stocktake" : "", "Stolen" : "", "Stolen {0}" : "", "Stop" : "", "Stop Publishing" : "", "Stores" : "", "Stray" : "", "Su" : "", "SubTotal" : "", "Subject" : "", "Submission received: {0}" : "", "Success" : "", "Successfully attached to {0}" : "", "Sugar Glider" : "", "Sun" : "", "Sunday" : "", "Super user" : "", "Superuser" : "", "Surname" : "&#3609;&#3634;&#3617;&#3626;&#3585;&#3640;&#3621;", "Surrender" : "", "Surrender Pickup" : "", "Suspect" : "", "Suspect 1" : "", "Suspect 2" : "", "Suspect 3" : "", "Suspect/Animal" : "", "Swan" : "", "Swedish Vallhund" : "", "Syntax check this SQL" : "", "System" : "", "System Admin" : "", "System Options" : "", "System user accounts" : "", "T = first letter of animal type" : "", "TNR" : "", "TNR - Trap/Neuter/Release" : "", "TT = first and second letter of animal type" : "", "Tabby" : "", "Tabby and White" : "", "Take another payment" : "", "Taken By" : "", "Tan" : "", "Tan and Black" : "", "Tan and White" : "", "Task complete." : "&#3648;&#3626;&#3619;&#3655;&#3592;&#3649;&#3621;&#3657;&#3623;", "Task items are executed in order of index, lowest to highest" : "", "Tattoo" : "", "Tattoo Date" : "", "Tattoo Number" : "", "Tax" : "", "Tax Amount" : "", "Tax Rate %" : "", "Telephone" : "&#3650;&#3607;&#3619;&#3624;&#3633;&#3614;&#3607;&#3660;", "Telephone Bills" : "", "Template" : "", "Template Name" : "", "Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "", "Tennessee Walker" : "", "Terrapin" : "", "Terrier" : "", "Test" : "&#3586;&#3657;&#3629;&#3588;&#3623;&#3634;&#3617;", "Test Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3626;&#3641;&#3597;&#3627;&#3634;&#3618;", "Test Book" : "", "Test Performed" : "", "Test Results" : "", "Test Types" : "", "Test book" : "", "Test marked as performed for {0} - {1}" : "", "Tests" : "&#3586;&#3657;&#3629;&#3588;&#3623;&#3634;&#3617;", "Tests need an animal and at least a required date." : "", "Text" : "&#3586;&#3657;&#3629;&#3588;&#3623;&#3634;&#3617;", "Text Encoding" : "", "Th" : "", "Thai Ridgeback" : "", "Thank you for choosing Animal Shelter Manager for your shelter!" : "", "Thank you, the document is now signed." : "", "That animal is already linked to the incident" : "", "The CSV file should be created by PayPal's \"All Activity\" report." : "", "The SmartTag PETID number" : "", "The SmartTag type" : "", "The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "", "The animal name" : "", "The animal record to merge must be different from the original." : "", "The animal sex" : "", "The base color of this animal" : "", "The coat type of this animal" : "", "The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "", "The database will be inaccessible to all users while the export is in progress." : "", "The date reported to the shelter" : "", "The date the animal died" : "", "The date the animal was FIV/L tested" : "", "The date the animal was adopted" : "", "The date the animal was altered" : "", "The date the animal was born" : "", "The date the animal was brought into the shelter" : "", "The date the animal was heartworm tested" : "", "The date the animal was microchipped" : "", "The date the animal was reclaimed" : "", "The date the animal was tattooed" : "", "The date the foster animal will be returned if known" : "", "The date the foster is effective from" : "", "The date the litter entered the shelter" : "", "The date the owner last contacted the shelter" : "", "The date the payment was received" : "", "The date the reservation is effective from" : "", "The date the retailer movement is effective from" : "", "The date the transfer is effective from" : "", "The date the trial adoption is over" : "", "The date the vaccination is required/due to be administered" : "", "The date the vaccination was administered" : "", "The date this animal was found" : "", "The date this animal was lost" : "", "The date this animal was put on the waiting list" : "", "The date this animal was removed from the waiting list" : "", "The date this animal was reserved" : "", "The date this animal was returned to its owner" : "", "The date this person was homechecked." : "", "The default username is 'user' with the password 'letmein'" : "", "The entry reason for this animal" : "", "The litter this animal belongs to" : "", "The locale determines the language ASM will use when displaying text, dates and currencies." : "", "The location where the animal was picked up" : "", "The microchip number" : "", "The movement number '{0}' is not unique." : "", "The number of stock records to create" : "", "The period in days before waiting list urgency is increased" : "", "The person record to merge must be different from the original." : "", "The primary breed of this animal" : "", "The reason the owner wants to part with the animal" : "", "The reason this animal was removed from the waiting list" : "", "The remaining units in the container" : "", "The result of the FIV test" : "", "The result of the FLV test" : "", "The result of the heartworm test" : "", "The retail/resale price per unit" : "", "The secondary breed of this animal" : "", "The selected file is not an image." : "", "The shelter category for this animal" : "", "The shelter reference number" : "", "The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "", "The size of this animal" : "", "The species of this animal" : "", "The tattoo number" : "", "The type of unit in the container, eg: tablet, vial, etc." : "", "The veterinary license number." : "", "The wholesale/trade price the container was bought for" : "", "There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "", "There is not enough information in the form to create a found animal record (need a description and area found)." : "", "There is not enough information in the form to create a lost animal record (need a description and area lost)." : "", "There is not enough information in the form to create a person record (need a surname)." : "", "There is not enough information in the form to create a transport record (need animalname)." : "", "There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "", "There is not enough information in the form to create a waiting list record (need a description)." : "", "There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "", "These are the HTML headers and footers used when displaying online forms." : "", "These are the HTML headers and footers used when generating reports." : "", "These are the default values for these fields when creating new records." : "", "These batch processes are run each night by the system and should not need to be run manually." : "", "These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "", "These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "", "These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "", "These fields determine which columns are shown on the find animal and find person screens." : "", "These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "", "These options change the behaviour of the search box at the top of the page." : "", "These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "", "Third offence" : "", "This Month" : "", "This Week" : "", "This Year" : "", "This animal already has an active reservation." : "", "This animal has a SmartTag PETID" : "", "This animal has a tattoo" : "", "This animal has active reservations, they will be cancelled." : "", "This animal has an adoption fee of {0}" : "", "This animal has been FIV/L tested" : "", "This animal has been altered" : "", "This animal has been declawed" : "", "This animal has been heartworm tested" : "", "This animal has movements and cannot be removed." : "", "This animal has not been altered." : "", "This animal has not been microchipped." : "", "This animal has special needs" : "", "This animal has the same name as another animal recently added to the system." : "", "This animal is a crossbreed" : "", "This animal is bonded with {0}" : "", "This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "", "This animal is currently at a retailer, it will be automatically returned first." : "", "This animal is currently fostered, it will be automatically returned first." : "", "This animal is currently held and cannot be adopted." : "", "This animal is currently quarantined and should not leave the shelter." : "", "This animal is marked not for adoption." : "", "This animal is microchipped" : "", "This animal is not on the shelter." : "", "This animal is part of a cruelty case and should not leave the shelter." : "", "This animal should be held in case it is reclaimed" : "", "This animal should not be shown in figures and is not in the custody of the shelter" : "", "This animal was dead on arrival to the shelter" : "", "This animal was euthanized" : "", "This animal was picked up" : "", "This animal was transferred from another shelter" : "", "This code has already been used." : "", "This database is locked and in read-only mode. You cannot add, change or delete records." : "", "This database is locked." : "", "This date of birth is an estimate" : "", "This expense account is the source for costs of this type" : "", "This income account is the source for payments received of this type" : "", "This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "", "This many years after creation of a person record, the name, address and telephone data will be anonymized." : "", "This month" : "", "This movement cannot be from a retailer when the animal has no prior retailer movements." : "", "This person has an animal control incident against them" : "", "This person has an animal control incident against them." : "", "This person has been banned from adopting animals" : "", "This person has been banned from adopting animals." : "", "This person has been under investigation" : "", "This person has been under investigation." : "", "This person has movements and cannot be removed." : "", "This person has not passed a homecheck" : "", "This person has not passed a homecheck." : "", "This person has payments and cannot be removed." : "", "This person has previously surrendered an animal." : "", "This person is linked to a waiting list record and cannot be removed." : "", "This person is linked to an animal and cannot be removed." : "", "This person is linked to an investigation and cannot be removed." : "", "This person is linked to animal control and cannot be removed." : "", "This person is linked to animal licenses and cannot be removed." : "", "This person is linked to animal transportation and cannot be removed." : "", "This person is linked to citations and cannot be removed." : "", "This person is linked to found animals and cannot be removed." : "", "This person is linked to lost animals and cannot be removed." : "", "This person is linked to trap loans and cannot be removed." : "", "This person is not flagged as a fosterer and cannot foster animals." : "", "This person is not flagged as a retailer and cannot handle retailer movements." : "", "This person is very similar to another person on file, carry on creating this record?" : "", "This person lives in the same area as the person who brought the animal to the shelter." : "", "This record has been changed by another user, please reload." : "", "This report cannot be sent by email as it requires criteria to run." : "", "This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "", "This screen allows you to add extra images to your database, for use in reports and documents." : "", "This type of movement requires a date." : "", "This type of movement requires a person." : "", "This week" : "", "This will permanently remove the selected records, are you sure?" : "", "This will permanently remove the selected roles, are you sure?" : "", "This will permanently remove the selected user accounts. Are you sure?" : "", "This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this animal, are you sure?" : "", "This will permanently remove this incident, are you sure?" : "", "This will permanently remove this person, are you sure?" : "", "This will permanently remove this record, are you sure?" : "", "This will permanently remove this waiting list entry, are you sure?" : "", "This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "", "This year" : "", "Thoroughbred" : "", "Thu" : "", "Thumbnail size" : "", "Thursday" : "", "Tibetan Mastiff" : "", "Tibetan Spaniel" : "", "Tibetan Terrier" : "", "Tiger" : "&#3648;&#3626;&#3639;&#3629;", "Time" : "", "Time Brought In" : "", "Time On List" : "", "Time On Shelter" : "", "Time on list" : "", "Time on shelter" : "", "Timeline" : "", "Timeline ({0})" : "", "Times should be in HH:MM format, eg: 09:00, 16:30" : "", "Title" : "", "Title First Last" : "", "Title Initials Last" : "", "To" : "", "To Adoption" : "", "To Fostering" : "", "To Other" : "", "To Retailer" : "", "To add people to the rota, create new person records with the staff or volunteer flag." : "", "To continue using ASM, please renew {0}" : "", "To week beginning" : "", "Today" : "", "Tonkinese" : "", "Too Many Animals" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "Tooltip" : "", "Top Margin" : "", "Tortie" : "", "Tortie and White" : "", "Tortoise" : "", "Tosa Inu" : "", "Total" : "", "Total number of units in the container" : "", "Total payments" : "", "Toucan" : "", "Toy Fox Terrier" : "", "Training" : "", "Transactions" : "", "Transactions need a date and description." : "", "Transfer" : "", "Transfer In" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transfer To" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transfer an animal" : "", "Transfer from Municipal Shelter" : "", "Transfer from Other Shelter" : "", "Transfer successfully created." : "", "Transfer?" : "", "Transferred" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transferred From" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transferred In" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transferred In {0}" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3648;&#3586;&#3657;&#3634;", "Transferred Out" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3629;&#3629;&#3585;", "Transferred Out {0}" : "&#3650;&#3629;&#3609;&#3618;&#3657;&#3634;&#3618;&#3629;&#3629;&#3585;", "Transfers must have a valid transfer date." : "", "Transport" : "", "Transport Book" : "", "Transport Types" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3585;&#3619;&#3634;&#3615;", "Transport book" : "", "Transport requires an animal" : "", "Transports must have valid pickup and dropoff dates and times." : "", "Trap Loans" : "", "Trap Number" : "", "Trap Types" : "", "Trap loan" : "", "Trap loans" : "", "Treat animals at retailers as part of the shelter inventory" : "", "Treat foster animals as part of the shelter inventory" : "", "Treat trial adoptions as part of the shelter inventory" : "", "Treatment" : "", "Treatment Given" : "", "Treatment marked as given for {0} - {1}" : "", "Treatment name cannot be blank" : "", "Treatments" : "", "Treeing Walker Coonhound" : "", "Trial Adoption" : "", "Trial adoption" : "", "Trial adoption book" : "", "Trial ends on" : "", "Tricolour" : "", "Trigger Batch Processes" : "", "Tu" : "", "Tue" : "", "Tuesday" : "", "Tumblr" : "", "Turkey" : "", "Turkish Angora" : "", "Turkish Van" : "", "Turtle" : "&#3648;&#3605;&#3656;&#3634;", "Twitter" : "", "Type" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;", "Type of animal links to show" : "", "U (Unwanted Cat)" : "", "UK Giftaid" : "", "URL" : "", "UUUUUUUUUU or UUUU = unique number" : "", "Unable to Afford" : "", "Unable to Cope" : "", "Unaltered" : "", "Unaltered Adopted Animals" : "", "Unaltered Dog - 1 year" : "", "Unaltered Dog - 3 year" : "", "Unavailable" : "", "Under {0} weeks old" : "", "Unit" : "", "Unit Price" : "", "Unit within the location, eg: pen or cage number" : "", "Units" : "", "Unknown" : "&#3652;&#3617;&#3656;&#3607;&#3619;&#3634;&#3610;", "Unknown microchip brand" : "", "Unpaid Fines" : "", "Unreserved" : "", "Unsaved Changes" : "", "Unspecified" : "", "Unsuitable Accomodation" : "", "Up for adoption" : "", "Upcoming medical items" : "", "Update" : "", "Update publishing options" : "", "Update system options" : "", "Update the daily boarding cost for this animal" : "", "Updated database to version {0}" : "", "Updated." : "", "Updating..." : "", "Upload" : "", "Upload Document" : "", "Upload ODT" : "", "Upload Photo" : "", "Upload a new OpenOffice template" : "", "Upload all available images for animals" : "", "Upload an SQL script" : "", "Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "", "Uploading..." : "", "Urgencies" : "", "Urgency" : "", "Urgent" : "", "Usage Date" : "", "Usage Type" : "", "Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "", "Use Automatic Insurance Numbers" : "", "Use HTML5 client side image scaling where available to speed up image uploads" : "", "Use SQL Interface" : "", "Use a single breed field" : "", "Use animal comments" : "", "Use fancy tooltips" : "", "Use notes from preferred photo" : "", "Use the icon in the lower right of notes fields to view them in a separate window." : "", "User Accounts" : "", "User Roles" : "", "User accounts that will only ever call the Service API should set this to No." : "", "User roles" : "", "Username" : "", "Username '{0}' already exists" : "", "Users" : "", "Users need a username, password and at least one role or the superuser flag setting." : "", "Vacation" : "&#3607;&#3637;&#3656;&#3605;&#3633;&#3657;&#3591;", "Vaccinate" : "", "Vaccinate Animal" : "", "Vaccination" : "", "Vaccination Book" : "", "Vaccination Given" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3623;&#3633;&#3588;&#3595;&#3637;&#3609;", "Vaccination Types" : "&#3611;&#3619;&#3632;&#3648;&#3616;&#3607;&#3623;&#3633;&#3588;&#3595;&#3637;&#3609;", "Vaccination book" : "", "Vaccination marked as given for {0} - {1}" : "", "Vaccinations" : "", "Vaccinations need an animal and at least a required date." : "", "Vaccinations require an animal" : "", "Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "", "Valid tokens for the subject and text" : "", "Value" : "", "Various" : "", "Vertical Pitch" : "", "Very Large" : "&#3651;&#3627;&#3597;&#3656;&#3617;&#3634;&#3585;", "Vet" : "", "Vet Visit" : "", "Victim" : "", "Victim Name" : "", "Video Link" : "", "Vietnamese Pot Bellied" : "", "View" : "", "View Accounts" : "", "View Animals" : "&#3604;&#3641;&#3626;&#3633;&#3605;&#3623;&#3660;&#3605;&#3656;&#3634;&#3591;&#3654;", "View Audit Trail" : "", "View Citations" : "", "View Clinic Appointment" : "", "View Cost" : "", "View Diary" : "", "View Diets" : "", "View Document" : "", "View Document Repository" : "", "View Found Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "View Incidents" : "", "View Incoming Forms" : "", "View Investigations" : "", "View Licenses" : "", "View Litter" : "", "View Log" : "&#3611;&#3641;&#3617;&#3610;&#3633;&#3609;&#3607;&#3638;&#3585;&#3651;&#3627;&#3617;&#3656;", "View Lost Animal" : "&#3626;&#3633;&#3605;&#3623;&#3660;&#3607;&#3637;&#3656;&#3614;&#3610;&#3648;&#3592;&#3629;", "View Manual" : "", "View Media" : "", "View Medical Records" : "", "View Movement" : "", "View PDF" : "", "View Payments" : "", "View Person" : "", "View Person Links" : "", "View Report" : "", "View Roles" : "", "View Rota" : "", "View Shelter Animals" : "", "View Staff Person Records" : "", "View Stock" : "", "View Tests" : "", "View Training Videos" : "", "View Transport" : "", "View Trap Loans" : "", "View Vaccinations" : "", "View Volunteer Person Records" : "", "View Vouchers" : "", "View Waiting List" : "&#3604;&#3641;&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3619;&#3629;", "View animals matching publishing options" : "", "View littermates" : "", "View matching records" : "", "View media" : "", "View publishing logs" : "", "Visual Theme" : "", "Vizsla" : "", "Volunteer" : "", "Voucher Types" : "", "Vouchers" : "", "Vouchers need an issue and expiry date." : "", "WARNING: This animal has not been microchipped" : "", "WARNING: This animal is over 6 months old and has not been neutered/spayed" : "", "Waiting" : "&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3588;&#3629;&#3618;", "Waiting List" : "&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3588;&#3629;&#3618;", "Waiting List - Additional" : "", "Waiting List - Details" : "", "Waiting List - Removal" : "", "Waiting List Contact" : "", "Waiting List Donation" : "", "Waiting List {0}" : "&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3619;&#3629;: {0}", "Waiting List: {0}" : "&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3619;&#3629;: {0}", "Waiting Room" : "&#3619;&#3634;&#3618;&#3594;&#3639;&#3656;&#3629;&#3607;&#3637;&#3656;&#3585;&#3635;&#3621;&#3633;&#3591;&#3588;&#3629;&#3618;", "Waiting for documents..." : "", "Waiting list donations" : "", "Waiting list entries matching '{0}'." : "", "Waiting list entries must have a contact" : "", "Waiting list entry for {0} ({1})" : "", "Waiting list entry successfully added." : "", "Waiting list urgency update period in days" : "", "Warmblood" : "", "Warn if the name of the new animal is similar to one entered recently" : "", "Warn when adopting an animal who has not been microchipped" : "", "Warn when adopting an unaltered animal" : "", "Warn when adopting to a person who has been banned from adopting animals" : "", "Warn when adopting to a person who has not been homechecked" : "", "Warn when adopting to a person who has previously brought an animal to the shelter" : "", "Warn when adopting to a person who lives in the same area as the original owner" : "", "Warn when creating multiple reservations on the same animal" : "", "Warnings" : "&#3588;&#3635;&#3648;&#3605;&#3639;&#3629;&#3609;", "Wasted" : "", "Water Bills" : "", "We" : "", "Wed" : "", "Wednesday" : "", "Week" : "", "Week beginning {0}" : "", "Weekly" : "&#3619;&#3634;&#3618;&#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "Weight" : "&#3609;&#3657;&#3635;&#3627;&#3609;&#3633;&#3585;", "Weimaraner" : "", "Welcome!" : "", "Welsh Corgi" : "", "Welsh Springer Spaniel" : "", "Welsh Terrier" : "", "West Highland White Terrier Westie" : "", "Wheaten Terrier" : "", "When" : "", "When ASM should stop showing this message" : "", "When I change the location of an animal, make a note of it in the log with this type" : "", "When I change the weight of an animal, make a note of it in the log with this type" : "", "When I generate a document, make a note of it in the log with this type" : "", "When I mark an animal held, make a note of it in the log with this type" : "", "When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "", "When a message is created, email it to each matching user" : "", "When creating payments from the Move menu screens, mark them due instead of received" : "", "When displaying calendars, the first day of the week is" : "", "When displaying person names, use the format" : "", "When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "", "When entering vaccinations, default the last batch number and manufacturer for that type" : "", "When matching lost animals, include shelter animals" : "", "When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "", "When receiving multiple payments, allow the due and received dates to be set" : "", "When receiving payments, allow a quantity and unit price to be set" : "", "When receiving payments, allow recording of sales tax with a default rate of" : "", "When receiving payments, allow the deposit account to be overridden" : "", "When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "", "When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "", "Where this animal is located within the shelter" : "", "Whippet" : "", "White" : "&#3586;&#3634;&#3623;", "White German Shepherd" : "", "White and Black" : "&#3586;&#3634;&#3623;&#3649;&#3621;&#3632;&#3604;&#3635;", "White and Brindle" : "", "White and Brown" : "&#3586;&#3634;&#3623;&#3649;&#3621;&#3632;&#3609;&#3657;&#3635;&#3605;&#3634;&#3621;", "White and Grey" : "&#3586;&#3634;&#3623;&#3649;&#3621;&#3632;&#3648;&#3607;&#3634;", "White and Liver" : "", "White and Tabby" : "", "White and Tan" : "", "White and Torti" : "", "Will this owner give a donation?" : "", "Wire-haired Pointing Griffon" : "", "Wirehaired Terrier" : "", "With Vet" : "", "With overnight batch" : "", "Withdrawal" : "", "Wk" : "", "Work" : "", "Work Phone" : "", "Work Types" : "", "XXX or XX = number unique for this year" : "", "Xoloitzcuintle/Mexican Hairless" : "", "YY or YYYY = current year" : "", "Yellow Labrador Retriever" : "", "Yellow and Grey" : "&#3648;&#3627;&#3621;&#3639;&#3629;&#3591;&#3649;&#3621;&#3632;&#3648;&#3607;&#3634;", "Yes" : "&#3651;&#3594;&#3656;", "Yes/No" : "&#3651;&#3594;&#3656;/&#3652;&#3617;&#3656;", "Yes/No/Unknown" : "", "Yorkshire Terrier Yorkie" : "", "You can bookmark search results, animals, people and most data entry screens." : "", "You can drag and drop animals in shelter view to change their locations." : "", "You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "", "You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "", "You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "", "You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "", "You can sort tables by clicking on the column headings." : "", "You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "", "You can use incoming forms to create new records or attach them to existing records." : "", "You can't have a return without a movement." : "", "You didn't specify any search criteria, so an on-shelter search was assumed." : "", "You have unsaved changes, are you sure you want to leave this page?" : "", "You must supply a code." : "", "Young Adult" : "", "Your CSV file should have a header row with field names ASM recognises." : "", "Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "", "Zipcode" : "", "Zipcode contains" : "", "[None]" : "[&#3652;&#3617;&#3656;&#3617;&#3637;]", "after connecting, chdir to" : "", "and" : "", "are sent to" : "", "at" : "", "cm" : "", "days" : "&#3623;&#3633;&#3609;", "estimate" : "", "filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "", "inches" : "", "invalid" : "", "kg" : "", "lb" : "", "less" : "", "mins" : "", "months" : "&#3648;&#3604;&#3639;&#3629;&#3609;", "more" : "", "on" : "", "or" : "", "or estimated age in years" : "", "oz" : "", "to" : "", "today" : "", "treatments" : "", "treatments, every" : "", "weekdays" : "", "weeks" : "&#3626;&#3633;&#3611;&#3604;&#3634;&#3627;&#3660;", "weeks after last contact." : "", "years" : "&#3611;&#3637;", "yesterday" : "", "{0} (under {1} months)" : "", "{0} - {1} ({2} {3} aged {4})" : "", "{0} - {1} {2}" : "", "{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "", "{0} animals successfully updated." : "", "{0} cannot be blank" : "", "{0} fine, paid" : "", "{0} fine, unpaid" : "", "{0} incurred in costs" : "", "{0} is running ({1}&#37; complete)." : "", "{0} payment records created." : "", "{0} received" : "", "{0} record(s) match the mail merge." : "", "{0} results." : "", "{0} rows affected." : "", "{0} selected" : "", "{0} treatments every {1} days" : "", "{0} treatments every {1} months" : "", "{0} treatments every {1} weekdays" : "", "{0} treatments every {1} weeks" : "", "{0} treatments every {1} years" : "", "{0} {1} ({2} treatments)" : "", "{0} {1} aged {2}" : "", "{0} {1} {2} aged {3}" : "", "{0} {1}: Moved from {2} to {3}" : "", "{0} {1}: adopted by {2}" : "", "{0} {1}: altered" : "", "{0} {1}: available for adoption" : "", "{0} {1}: died ({2})" : "", "{0} {1}: entered the shelter" : "", "{0} {1}: escaped" : "", "{0} {1}: euthanised ({2})" : "", "{0} {1}: fostered to {2}" : "", "{0} {1}: held" : "", "{0} {1}: microchipped" : "", "{0} {1}: not available for adoption" : "", "{0} {1}: quarantined" : "", "{0} {1}: received {2}" : "", "{0} {1}: reclaimed by {2}" : "", "{0} {1}: released" : "", "{0} {1}: reserved by {2}" : "", "{0} {1}: returned by {2}" : "", "{0} {1}: sent to retailer {2}" : "", "{0} {1}: stolen" : "", "{0} {1}: tested positive for FIV" : "", "{0} {1}: tested positive for FeLV" : "", "{0} {1}: tested positive for Heartworm" : "", "{0} {1}: transferred to {2}" : "", "{0}, Week {1}" : "", "{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "", "{0}: closed {1} ({2})" : "", "{0}: opened {1}" : "", "{0}: waiting list - {1}" : "", "{0}: {1} {2} - {3} {4}" : "", "{2}: found in {1}: {0}" : "", "{2}: lost in {1}: {0}" : "", "{plural0} animal as dead on arrival" : "", "{plural0} animal control call due for followup today" : "", "{plural0} animal died" : "", "{plural0} animal entered the shelter" : "", "{plural0} animal has a hold ending today" : "", "{plural0} animal has been on the shelter longer than {0} months" : "", "{plural0} animal is not available for adoption" : "", "{plural0} animal was adopted" : "", "{plural0} animal was euthanized" : "", "{plural0} animal was reclaimed by its owner" : "", "{plural0} animal was transferred to another shelter" : "", "{plural0} day." : "", "{plural0} incomplete animal control call" : "", "{plural0} item of stock expires in the next month" : "", "{plural0} item of stock has expired" : "", "{plural0} medical treatment needs to be administered today" : "", "{plural0} month." : "", "{plural0} new online form submission" : "", "{plural0} person has an overdue payment" : "", "{plural0} person with an active reservation has not been homechecked" : "", "{plural0} potential match for a lost animal" : "", "{plural0} recent publisher run had errors" : "", "{plural0} reservation has been active over a week without adoption" : "", "{plural0} result found in {1} seconds. Order: {2}" : "", "{plural0} shelter animal has not been microchipped" : "", "{plural0} shelter animal has people looking for them" : "", "{plural0} test needs to be performed today" : "", "{plural0} transport does not have a driver assigned" : "", "{plural0} trap is overdue for return" : "", "{plural0} trial adoption has ended" : "", "{plural0} unaltered animal has been adopted in the last month" : "", "{plural0} undispatched animal control call" : "", "{plural0} unpaid fine" : "", "{plural0} urgent entry on the waiting list" : "", "{plural0} vaccination has expired" : "", "{plural0} vaccination needs to be administered today" : "", "{plural0} week." : "", "{plural0} year." : "", "{plural1} animal control calls due for followup today" : "", "{plural1} animals are not available for adoption" : "", "{plural1} animals died" : "", "{plural1} animals entered the shelter" : "", "{plural1} animals have been on the shelter longer than {0} months" : "", "{plural1} animals have holds ending today" : "", "{plural1} animals were adopted" : "", "{plural1} animals were dead on arrival" : "", "{plural1} animals were euthanized" : "", "{plural1} animals were reclaimed by their owners" : "", "{plural1} animals were transferred to other shelters" : "", "{plural1} days." : "", "{plural1} incomplete animal control calls" : "", "{plural1} items of stock expire in the next month" : "", "{plural1} items of stock have expired" : "", "{plural1} medical treatments need to be administered today" : "", "{plural1} months." : "", "{plural1} new online form submissions" : "", "{plural1} people have overdue payments" : "", "{plural1} people with active reservations have not been homechecked" : "", "{plural1} potential matches for lost animals" : "", "{plural1} recent publisher runs had errors" : "", "{plural1} reservations have been active over a week without adoption" : "", "{plural1} results found in {1} seconds. Order: {2}" : "", "{plural1} shelter animals have not been microchipped" : "", "{plural1} shelter animals have people looking for them" : "", "{plural1} tests need to be performed today" : "", "{plural1} transports do not have a driver assigned" : "", "{plural1} traps are overdue for return" : "", "{plural1} trial adoptions have ended" : "", "{plural1} unaltered animals have been adopted in the last month" : "", "{plural1} undispatched animal control calls" : "", "{plural1} unpaid fines" : "", "{plural1} urgent entries on the waiting list" : "", "{plural1} vaccinations have expired" : "", "{plural1} vaccinations need to be administered today" : "", "{plural1} weeks." : "", "{plural1} years." : "", "{plural2} animal control calls due for followup today" : "", "{plural2} animals are not available for adoption" : "", "{plural2} animals died" : "", "{plural2} animals entered the shelter" : "", "{plural2} animals have been on the shelter longer than {0} months" : "", "{plural2} animals have holds ending today" : "", "{plural2} animals were adopted" : "", "{plural2} animals were dead on arrival" : "", "{plural2} animals were euthanized" : "", "{plural2} animals were reclaimed by their owners" : "", "{plural2} animals were transferred to other shelters" : "", "{plural2} days." : "", "{plural2} incomplete animal control calls" : "", "{plural2} items of stock expire in the next month" : "", "{plural2} items of stock have expired" : "", "{plural2} medical treatments need to be administered today" : "", "{plural2} months." : "", "{plural2} new online form submissions" : "", "{plural2} people have overdue payments" : "", "{plural2} people with active reservations have not been homechecked" : "", "{plural2} potential matches for lost animals" : "", "{plural2} recent publisher runs had errors" : "", "{plural2} reservations have been active over a week without adoption" : "", "{plural2} results found in {1} seconds. Order: {2}" : "", "{plural2} shelter animals have not been microchipped" : "", "{plural2} shelter animals have people looking for them" : "", "{plural2} tests need to be performed today" : "", "{plural2} transports do not have a driver assigned" : "", "{plural2} traps are overdue for return" : "", "{plural2} trial adoptions have ended" : "", "{plural2} unaltered animals have been adopted in the last month" : "", "{plural2} undispatched animal control calls" : "", "{plural2} unpaid fines" : "", "{plural2} urgent entries on the waiting list" : "", "{plural2} vaccinations have expired" : "", "{plural2} vaccinations need to be administered today" : "", "{plural2} weeks." : "", "{plural2} years." : "", "{plural3} animal control calls due for followup today" : "", "{plural3} animals are not available for adoption" : "", "{plural3} animals died" : "", "{plural3} animals entered the shelter" : "", "{plural3} animals have been on the shelter longer than {0} months" : "", "{plural3} animals have holds ending today" : "", "{plural3} animals were adopted" : "", "{plural3} animals were dead on arrival" : "", "{plural3} animals were euthanized" : "", "{plural3} animals were reclaimed by their owners" : "", "{plural3} animals were transferred to other shelters" : "", "{plural3} days." : "", "{plural3} incomplete animal control calls" : "", "{plural3} items of stock expire in the next month" : "", "{plural3} items of stock have expired" : "", "{plural3} medical treatments need to be administered today" : "", "{plural3} months." : "", "{plural3} new online form submissions" : "", "{plural3} people have overdue payments" : "", "{plural3} people with active reservations have not been homechecked" : "", "{plural3} potential matches for lost animals" : "", "{plural3} recent publisher runs had errors" : "", "{plural3} reservations have been active over a week without adoption" : "", "{plural3} results found in {1} seconds. Order: {2}" : "", "{plural3} shelter animals have not been microchipped" : "", "{plural3} shelter animals have people looking for them" : "", "{plural3} tests need to be performed today" : "", "{plural3} transports do not have a driver assigned" : "", "{plural3} traps are overdue for return" : "", "{plural3} trial adoptions have ended" : "", "{plural3} unaltered animals have been adopted in the last month" : "", "{plural3} undispatched animal control calls" : "", "{plural3} unpaid fines" : "", "{plural3} urgent entries on the waiting list" : "", "{plural3} vaccinations have expired" : "", "{plural3} vaccinations need to be administered today" : "", "{plural3} weeks." : "", "{plural3} years." : "" }
gpl-3.0
ammaradil/fibonacci
Lib/site-packages/pip/_vendor/requests/__init__.py
412
1861
# -*- coding: utf-8 -*- # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ requests HTTP library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <http://python-requests.org>. :copyright: (c) 2015 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ __title__ = 'requests' __version__ = '2.7.0' __build__ = 0x020700 __author__ = 'Kenneth Reitz' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2015 Kenneth Reitz' # Attempt to enable urllib3's SNI support, if possible try: from .packages.urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass from . import utils from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError ) # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler())
mit
wolverineav/neutron
neutron/db/migration/autogen.py
11
3994
# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic.operations import ops from alembic.util import Dispatcher from alembic.util import rev_id as new_rev_id from neutron.db.migration import cli _ec_dispatcher = Dispatcher() def process_revision_directives(context, revision, directives): if cli._use_separate_migration_branches(context.config): directives[:] = [ directive for directive in _assign_directives(context, directives) ] def _assign_directives(context, directives, phase=None): for directive in directives: decider = _ec_dispatcher.dispatch(directive) if phase is None: phases = cli.MIGRATION_BRANCHES else: phases = (phase,) for phase in phases: decided = decider(context, directive, phase) if decided: yield decided @_ec_dispatcher.dispatch_for(ops.MigrationScript) def _migration_script_ops(context, directive, phase): """Generate a new ops.MigrationScript() for a given phase. E.g. given an ops.MigrationScript() directive from a vanilla autogenerate and an expand/contract phase name, produce a new ops.MigrationScript() which contains only those sub-directives appropriate to "expand" or "contract". Also ensure that the branch directory exists and that the correct branch labels/depends_on/head revision are set up. """ version_path = cli._get_version_branch_path( context.config, release=cli.CURRENT_RELEASE, branch=phase) autogen_kwargs = {} cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs) op = ops.MigrationScript( new_rev_id(), ops.UpgradeOps(ops=[ d for d in _assign_directives( context, directive.upgrade_ops.ops, phase) ]), ops.DowngradeOps(ops=[]), message=directive.message, **autogen_kwargs ) if not op.upgrade_ops.is_empty(): return op @_ec_dispatcher.dispatch_for(ops.AddConstraintOp) @_ec_dispatcher.dispatch_for(ops.CreateIndexOp) @_ec_dispatcher.dispatch_for(ops.CreateTableOp) @_ec_dispatcher.dispatch_for(ops.AddColumnOp) def _expands(context, directive, phase): if phase == 'expand': return directive else: return None @_ec_dispatcher.dispatch_for(ops.DropConstraintOp) @_ec_dispatcher.dispatch_for(ops.DropIndexOp) @_ec_dispatcher.dispatch_for(ops.DropTableOp) @_ec_dispatcher.dispatch_for(ops.DropColumnOp) def _contracts(context, directive, phase): if phase == 'contract': return directive else: return None @_ec_dispatcher.dispatch_for(ops.AlterColumnOp) def _alter_column(context, directive, phase): is_expand = phase == 'expand' if is_expand and ( directive.modify_nullable is True ): return directive elif not is_expand and ( directive.modify_nullable is False ): return directive else: raise NotImplementedError( "Don't know if operation is an expand or " "contract at the moment: %s" % directive) @_ec_dispatcher.dispatch_for(ops.ModifyTableOps) def _modify_table_ops(context, directive, phase): op = ops.ModifyTableOps( directive.table_name, ops=[ d for d in _assign_directives(context, directive.ops, phase) ], schema=directive.schema) if not op.is_empty(): return op
apache-2.0
Swimlane/sw-python-client
functional_tests/driver_tests/test_user_group_fields.py
1
57102
import pytest from swimlane import exceptions @pytest.fixture(autouse=True, scope='module') def my_fixture(helpers): # setup stuff defaultApp = 'user group fields' pytest.swimlane_instance = helpers.swimlane_instance pytest.app, pytest.appid = helpers.findCreateApp(defaultApp) pytest.testUsers = list(pytest.usersCreated.keys()) pytest.testGroups = list(pytest.groupsCreated.keys()) yield # teardown stuff helpers.cleanupData() class TestRequiredUserGroupField: def test_required_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) assert theRecord["Required User/Groups"] == swimUser def test_required_field_not_set(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create(**{"User/Groups": swimUser}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Required field "Required User/Groups" is not set' % pytest.app.acronym def test_required_field_not_set_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Required User/Groups"] = None with pytest.raises(exceptions.ValidationError) as excinfo: theRecord.save() assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Required field "Required User/Groups" is not set' % theRecord.tracking_id class TestUserGroupField: def test_user_group_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "User/Groups": swimUser2}) assert theRecord["User/Groups"].id == swimUser2.id def test_user_group_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["User/Groups"] = swimUser2 def test_user_group_field_bad_type_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "User/Groups": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `User/Groups`' % ( pytest.app.acronym, swimGroup.name) def test_user_group_field_on_save_bad_type_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["User/Groups"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `User/Groups`' % ( theRecord.tracking_id, swimGroup.name) class TestGroupsOnlyField: def test_groups_only_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Groups Only": swimGroup}) assert theRecord["Groups Only"] == swimGroup def test_groups_only_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Groups Only"] = swimGroup def test_groups_only_field_bad_type_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Groups Only": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Groups Only`' % ( pytest.app.acronym, swimUser2.username) def test_groups_only_field_on_save_bad_type_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Groups Only"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Groups Only`' % ( theRecord.tracking_id, swimUser2.username) class TestReadOnlyUserGroupsField: def test_read_only_user_groups_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Read-only User/Groups": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Cannot set readonly field \'Read-only User/Groups\'' % pytest.app.acronym def test_read_only_user_groups_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Read-only User/Groups"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Cannot set readonly field \'Read-only User/Groups\'' % theRecord.tracking_id class TestCreatedByField: def test_created_by_field_value(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) assert theRecord["Created by"] == swimUser @pytest.mark.xfail(reason="SPT-6352: This should fail, that the Created by is read only.") def test_created_by_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Created by": swimUser2}) assert theRecord["Created by"] == swimUser @pytest.mark.xfail(reason="SPT-6352: This should fail, that the Created by is read only.") def test_created_by_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Created by"] = swimUser2 theRecord.save() assert theRecord["Created by"] == swimUser class TestLastUpdatedByField: def test_last_updated_by_field_value(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) assert theRecord["Last updated by"] == swimUser @pytest.mark.xfail(reason="SPT-6352: This should fail, that the last updated by is read only.") def test_last_updated_by_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Last updated by": swimUser2}) assert theRecord["Last updated by"] == swimUser @pytest.mark.xfail(reason="SPT-6352: This should fail, that the Last updated by is read only.") def test_last_updated_by_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Last updated by"] = swimUser2 theRecord.save() assert theRecord["Last updated by"] == swimUser class TestAllUsersAndGroupsField: def test_all_users_and_groups_field_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "All Users and Groups": swimUser2}) assert theRecord["All Users and Groups"].id == swimUser2.id def test_all_users_and_groups_field_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "All Users and Groups": swimGroup}) assert theRecord["All Users and Groups"] == swimGroup def test_all_users_and_groups_field_user_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["All Users and Groups"] = swimUser2 theRecord.save() def test_all_users_and_groups_field_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["All Users and Groups"] = swimGroup theRecord.save() def test_all_users_and_groups_field_on_save_bad_value_type(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["All Users and Groups"] = {"name": swimGroup} assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Field \'All Users and Groups\' expects one of \'UserGroup\', got \'dict\' instead' % theRecord.tracking_id class TestSelectedGroupsField: def test_selected_groups_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Groups": swimGroup}) assert theRecord["Selected Groups"] == swimGroup def test_selected_groups_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Selected Groups"] = swimGroup theRecord.save() def test_selected_groups_field_wrong_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Groups": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Selected Groups`' % ( pytest.app.acronym, swimGroup.name) def test_selected_groups_field_wrong_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Selected Groups"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Selected Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_selected_groups_field_bad_type_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Groups": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Selected Groups`' % ( pytest.app.acronym, swimUser2.username) def test_selected_groups_field_on_save_bad_type_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Selected Groups"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Selected Groups`' % ( theRecord.tracking_id, swimUser2.username) class TestSelectedUsersField: def test_selected_users_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Users": swimUser2}) assert theRecord["Selected Users"].id == swimUser2.id def test_selected_users_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userFour") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Selected Users"] = swimUser2 theRecord.save() def test_selected_users_field_wrong_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Users": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Selected Users`' % ( pytest.app.acronym, swimUser2.username) def test_selected_users_field_wrong_user_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Selected Users"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Selected Users`' % ( theRecord.tracking_id, swimUser2.username) def test_selected_users_field_bad_type_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Selected Users": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Selected Users`' % ( pytest.app.acronym, swimGroup.name) def test_selected_users_field_on_save_bad_type_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Selected Users"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Selected Users`' % ( theRecord.tracking_id, swimGroup.name) class TestSubgroupsOfGroupField: def test_sub_groups_of_group_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup}) assert theRecord["Sub-groups of Group"] == swimGroup def test_sub_groups_of_group_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Sub-groups of Group"] = swimGroup theRecord.save() def test_sub_groups_of_group_field_parent_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupCombo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % ( pytest.app.acronym, swimGroup.name) def test_sub_groups_of_group_field_parent_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupCombo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Sub-groups of Group"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % ( theRecord.tracking_id, swimGroup.name) def test_sub_groups_of_group_field_other_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % ( pytest.app.acronym, swimGroup.name) def test_sub_groups_of_group_field_other_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Sub-groups of Group"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % ( theRecord.tracking_id, swimGroup.name) def test_sub_groups_of_group_field_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Sub-groups of Group": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Sub-groups of Group`' % ( pytest.app.acronym, swimUser2.username) def test_sub_groups_of_group_field_user_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Sub-groups of Group"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Sub-groups of Group`' % ( theRecord.tracking_id, swimUser2.username) class TestUsersMembersOfGroupField: @pytest.mark.xfail(reason="SPT-6355: Says the user who belongs to the group is not a valid selection.") def test_users_members_of_group_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Users Members of Group": swimUser2}) assert theRecord["Users Members of Group"] == swimUser2 @pytest.mark.xfail(reason="SPT-6355: Says the user who belongs to the group is not a valid selection.") def test_users_members_of_group_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Users Members of Group"] = swimUser2 theRecord.save() def test_users_members_of_group_field_user_not_member(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userFour") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Users Members of Group": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Users Members of Group`' % ( pytest.app.acronym, swimUser2.username) def test_users_members_of_group_field_not_member_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userFour") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Users Members of Group"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Users Members of Group`' % ( theRecord.tracking_id, swimUser2.username) def test_users_members_of_group_field_user_parent_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Users Members of Group": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Users Members of Group`' % (pytest.app.acronym, swimGroup.name) def test_users_members_of_group_field_parent_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Users Members of Group"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Users Members of Group`' % ( theRecord.tracking_id, swimGroup.name) class TestMultiSelectUsersField: def test_multi_select_users_field(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser2]}) assert len(theRecord["Multi-select User/Groups"]) == 1 for member in theRecord["Multi-select User/Groups"]: assert member.id == swimUser2.id def test_multi_select_users_field_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Multi-select User/Groups"] = [swimUser2] theRecord.save() # Should we handle this or say it has to be a list/array? def test_multi_select_users_field_single_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(TypeError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": swimUser2}) assert str(excinfo.value) == '\'User\' object is not iterable' # Should we handle this or say it has to be a list/array? def test_multi_select_users_field_single_user_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(TypeError) as excinfo: theRecord["Multi-select User/Groups"] = swimUser2 assert str(excinfo.value) == '\'User\' object is not iterable' def test_multi_select_users_field_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimGroup]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (pytest.app.acronym, swimGroup.name) def test_multi_select_users_field_group_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select User/Groups"] = [swimGroup] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_multi_select_users_field_mix_users_groups(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser2, swimGroup]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (pytest.app.acronym, swimGroup.name) def test_multi_select_users_field_mix_users_groups_on_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select User/Groups"] = [swimUser2, swimGroup] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % ( theRecord.tracking_id, swimGroup.name) @pytest.mark.xfail(reason="SPT-6354: This works for the adminuser, but not the others..") def test_multi_select_users_field_deselect_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]}) theRecord["Multi-select User/Groups"].deselect(swimUser2) theRecord.save() updatedRecord = pytest.app.records.get(id=theRecord.id) assert len(updatedRecord["Multi-select User/Groups"]) == 1 assert updatedRecord["Multi-select User/Groups"][0].id == swimUser.id def test_multi_select_users_field_deselect_other_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") swimUser3 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]}) with pytest.raises(KeyError) as excinfo: theRecord["Multi-select User/Groups"].deselect(swimUser3) assert str(excinfo.value) == '<User: %s>' % swimUser3.username theRecord.save() updatedRecord = pytest.app.records.get(id=theRecord.id) assert len(updatedRecord["Multi-select User/Groups"]) == 2 userIds = [updatedRecord["Multi-select User/Groups"][1].id, updatedRecord["Multi-select User/Groups"][0].id] assert swimUser.id in userIds assert swimUser2.id in userIds def test_multi_select_users_field_select_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") swimUser3 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]}) theRecord["Multi-select User/Groups"].select(swimUser3) theRecord.save() updatedRecord = pytest.app.records.get(id=theRecord.id) assert len(updatedRecord["Multi-select User/Groups"]) == 3 userIds = [updatedRecord["Multi-select User/Groups"][1].id, updatedRecord["Multi-select User/Groups"][0].id, updatedRecord["Multi-select User/Groups"][2].id] assert swimUser3.id in userIds assert swimUser2.id in userIds assert swimUser.id in userIds def test_multi_select_users_field_select_existing_user(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]}) theRecord["Multi-select User/Groups"].select(swimUser2) theRecord.save() updatedRecord = pytest.app.records.get(id=theRecord.id) assert len(updatedRecord["Multi-select User/Groups"]) == 2 userIds = [updatedRecord["Multi-select User/Groups"][1].id, updatedRecord["Multi-select User/Groups"][0].id] assert swimUser.id in userIds assert swimUser2.id in userIds def test_multi_select_users_field_select_group(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)]) swimGroup = pytest.swimlane_instance.groups.get( name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)]) theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select User/Groups"].select(swimGroup) assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % ( theRecord.tracking_id, swimGroup.name) theRecord.save() updatedRecord = pytest.app.records.get(id=theRecord.id) assert len(updatedRecord["Multi-select User/Groups"]) == 2 userIds = [updatedRecord["Multi-select User/Groups"][1].id, updatedRecord["Multi-select User/Groups"][0].id] assert swimUser.id in userIds assert swimUser2.id in userIds class TestMultiSelectSpecificUsersAndGroupsField: def test_multi_select_specific_users_groups_field_user_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]}) assert len(theRecord["Multi-select Specific Users and Groups"]) == 1 for member in theRecord["Multi-select Specific Users and Groups"]: assert member.id == swimUser2.id def test_multi_select_specific_users_groups_field_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]}) assert len(theRecord["Multi-select Specific Users and Groups"]) == 1 for member in theRecord["Multi-select Specific Users and Groups"]: assert member.id == swimGroup.id def test_multi_select_specific_users_groups_field_user_and_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2, swimGroup]}) assert len(theRecord["Multi-select Specific Users and Groups"]) == 2 for member in theRecord["Multi-select Specific Users and Groups"]: assert member.id in [swimUser2.id, swimGroup.id] def test_multi_select_specific_users_groups_field_user_and_group_invalid_user_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup, swimUser2]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( pytest.app.acronym, swimUser2.username) def test_multi_select_specific_users_groups_field_user_and_group_invalid_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup, swimUser2]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name) def test_multi_select_specific_users_groups_field_invalid_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name) def test_multi_select_specific_users_groups_field_invalid_group_subgroup_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name) def test_multi_select_specific_users_groups_field_invalid_user_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( pytest.app.acronym, swimUser2.username) def test_multi_select_specific_users_groups_field_invalid_user_group_member_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userThree") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( pytest.app.acronym, swimUser2.username) def test_multi_select_specific_users_groups_field_user_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Multi-select Specific Users and Groups"] = [swimUser2] theRecord.save() def test_multi_select_specific_users_groups_field_group_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Multi-select Specific Users and Groups"] = [swimGroup] theRecord.save() def test_multi_select_specific_users_groups_field_user_and_group_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Multi-select Specific Users and Groups"] = [swimUser2, swimGroup] theRecord.save() def test_multi_select_specific_users_groups_field_invalid_group_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select Specific Users and Groups"] = [swimGroup] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_multi_select_specific_users_groups_field_invalid_group_subgroup_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select Specific Users and Groups"] = [swimGroup] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_multi_select_specific_users_groups_field_invalid_user_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select Specific Users and Groups"] = [swimUser2] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( theRecord.tracking_id, swimUser2.username) def test_multi_select_specific_users_groups_field_invalid_user_group_member_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Multi-select Specific Users and Groups"] = [swimUser2] assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % ( theRecord.tracking_id, swimUser2.username) class TestSelectSpecificUsersAndGroupsField: def test_select_specific_users_groups_field_user_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2}) assert theRecord["Specific Users and Groups"].id == swimUser2.id def test_select_specific_users_groups_field_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup}) assert theRecord["Specific Users and Groups"] == swimGroup def test_select_specific_users_groups_field_invalid_group_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name) def test_select_specific_users_groups_field_invalid_group_subgroup_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name) def test_select_specific_users_groups_field_invalid_user_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % ( pytest.app.acronym, swimUser2.username) def test_select_specific_users_groups_field_invalid_user_group_member_create(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userThree") with pytest.raises(exceptions.ValidationError) as excinfo: pytest.app.records.create( **{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2}) assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % ( pytest.app.acronym, swimUser2.username) def test_select_specific_users_groups_field_user_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userOne") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Specific Users and Groups"] = swimUser2 theRecord.save() def test_select_specific_users_groups_field_group_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) theRecord["Specific Users and Groups"] = swimGroup theRecord.save() def test_select_specific_users_groups_field_invalid_group_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupFour") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Specific Users and Groups"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_select_specific_users_groups_field_invalid_group_subgroup_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimGroup = pytest.swimlane_instance.groups.get( name="PYTHON-groupThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Specific Users and Groups"] = swimGroup assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % ( theRecord.tracking_id, swimGroup.name) def test_select_specific_users_groups_field_invalid_user_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userTwo") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Specific Users and Groups"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % ( theRecord.tracking_id, swimUser2.username) def test_select_specific_users_groups_field_invalid_user_group_member_save(helpers): swimUser = pytest.swimlane_instance.users.get(display_name="admin") swimUser2 = pytest.swimlane_instance.users.get( display_name="PYTHON-userThree") theRecord = pytest.app.records.create( **{"Required User/Groups": swimUser}) with pytest.raises(exceptions.ValidationError) as excinfo: theRecord["Specific Users and Groups"] = swimUser2 assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % ( theRecord.tracking_id, swimUser2.username)
mit
kamijawa/ogc_server
bayesian/test/test_gaussian_bayesian_network.py
2
2497
from __future__ import division import pytest import os from bayesian.gaussian import MeansVector, CovarianceMatrix from bayesian.gaussian_bayesian_network import * from bayesian.examples.gaussian_bayesian_networks.river import ( f_a, f_b, f_c, f_d) def pytest_funcarg__river_graph(request): g = build_graph(f_a, f_b, f_c, f_d) return g class TestGBN(): def test_get_joint_parameters(self, river_graph): mu, sigma = river_graph.get_joint_parameters() assert mu == MeansVector( [[3], [4], [9], [14]], names=['a', 'b', 'c', 'd']) assert sigma == CovarianceMatrix( [[4, 4, 8, 12], [4, 5, 8, 13], [8, 8, 20, 28], [12, 13, 28, 42]], names=['a', 'b', 'c', 'd']) def test_query(self, river_graph): result = river_graph.query(a=7) mu = result['joint']['mu'] sigma = result['joint']['sigma'] assert mu == MeansVector([ [8], [17], [26]], names=['b', 'c', 'd']) assert sigma == CovarianceMatrix( [[1, 0, 1], [0, 4, 4], [1, 4, 6]], names=['b', 'c', 'd']) result = river_graph.query(a=7, c=17) mu = result['joint']['mu'] sigma = result['joint']['sigma'] assert mu == MeansVector([ [8], [26]], names=['b', 'd']) assert sigma == CovarianceMatrix( [[1, 1], [1, 2]], names=['b', 'd']) result = river_graph.query(a=7, c=17, b=8) mu = result['joint']['mu'] sigma = result['joint']['sigma'] assert mu == MeansVector([ [26]], names=['d']) assert sigma == CovarianceMatrix( [[1]], names=['d']) def test_assignment_of_joint_parameters(self, river_graph): assert river_graph.nodes['b'].func.joint_mu == MeansVector([ [3], [4]], names=['a', 'b']) assert river_graph.nodes['b'].func.covariance_matrix == CovarianceMatrix([ [4, 4], [4, 5]], names=['a', 'b']) def test_gaussian_pdf(self, river_graph): assert round(river_graph.nodes['a'].func(3), 4) == 0.1995 assert round(river_graph.nodes['a'].func(10), 4) == 0.0002 def test_multivariate_gaussian_pdf(self, river_graph): assert round(river_graph.nodes['d'].func(3, 1, 3), 4) == 0.0005
mit
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/visualization/data_providers/registry.py
1
5462
from galaxy.visualization.data_providers.basic import ColumnDataProvider from galaxy.visualization.data_providers import genome from galaxy.model import NoConverterException from galaxy.visualization.data_providers.phyloviz import PhylovizDataProvider from galaxy.datatypes.tabular import Tabular, Vcf from galaxy.datatypes.interval import Interval, ENCODEPeak, ChromatinInteractions, Gtf, Gff, Bed from galaxy.datatypes.xml import Phyloxml from galaxy.datatypes.data import Newick, Nexus class DataProviderRegistry( object ): """ Registry for data providers that enables listing and lookup. """ def __init__( self ): # Mapping from dataset type name to a class that can fetch data from a file of that # type. First key is converted dataset type; if result is another dict, second key # is original dataset type. self.dataset_type_name_to_data_provider = { "tabix": { Vcf: genome.VcfTabixDataProvider, Bed: genome.BedTabixDataProvider, Gtf: genome.GtfTabixDataProvider, ENCODEPeak: genome.ENCODEPeakTabixDataProvider, Interval: genome.IntervalTabixDataProvider, ChromatinInteractions: genome.ChromatinInteractionsTabixDataProvider, "default" : genome.TabixDataProvider }, "interval_index": genome.IntervalIndexDataProvider, "bai": genome.BamDataProvider, "bam": genome.SamDataProvider, "bigwig": genome.BigWigDataProvider, "bigbed": genome.BigBedDataProvider, "column_with_stats": ColumnDataProvider } def get_data_provider( self, trans, name=None, source='data', raw=False, original_dataset=None ): """ Returns data provider matching parameter values. For standalone data sources, source parameter is ignored. """ data_provider = None if raw: # Working with raw data. if isinstance( original_dataset.datatype, Gff ): data_provider_class = genome.RawGFFDataProvider elif isinstance( original_dataset.datatype, Bed ): data_provider_class = genome.RawBedDataProvider elif isinstance( original_dataset.datatype, Vcf ): data_provider_class = genome.RawVcfDataProvider elif isinstance( original_dataset.datatype, Tabular ): data_provider_class = ColumnDataProvider elif isinstance( original_dataset.datatype, ( Nexus, Newick, Phyloxml ) ): data_provider_class = PhylovizDataProvider data_provider = data_provider_class( original_dataset=original_dataset ) else: # Working with converted or standalone dataset. if name: # Provider requested by name; get from mappings. value = self.dataset_type_name_to_data_provider[ name ] if isinstance( value, dict ): # Get converter by dataset extension; if there is no data provider, # get the default. data_provider_class = value.get( original_dataset.datatype.__class__, value.get( "default" ) ) else: data_provider_class = value # If name is the same as original dataset's type, dataset is standalone. # Otherwise, a converted dataset is being used. if name == original_dataset.ext: data_provider = data_provider_class( original_dataset=original_dataset ) else: converted_dataset = original_dataset.get_converted_dataset( trans, name ) deps = original_dataset.get_converted_dataset_deps( trans, name ) data_provider = data_provider_class( original_dataset=original_dataset, converted_dataset=converted_dataset, dependencies=deps ) elif original_dataset: # No name, so look up a provider name from datatype's information. # Dataset must have data sources to get data. if not original_dataset.datatype.data_sources: return None # Get data provider mapping and data provider. data_provider_mapping = original_dataset.datatype.data_sources if 'data_standalone' in data_provider_mapping: data_provider = self.get_data_provider( trans, name=data_provider_mapping[ 'data_standalone' ], original_dataset=original_dataset ) else: source_list = data_provider_mapping[ source ] if isinstance( source_list, str ): source_list = [ source_list ] # Find a valid data provider in the source list. for source in source_list: try: data_provider = self.get_data_provider( trans, name=source, original_dataset=original_dataset ) break except NoConverterException: pass return data_provider
gpl-3.0
artur-shaik/qutebrowser
scripts/hostblock_blame.py
8
2305
#!/usr/bin/env python3 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Check by which hostblock list a host was blocked.""" import sys import io import os import os.path import configparser import urllib.request from PyQt5.QtCore import QStandardPaths sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) from qutebrowser.browser import adblock def main(): """Check by which hostblock list a host was blocked.""" if len(sys.argv) != 2: print("Usage: {} <host>".format(sys.argv[0]), file=sys.stderr) sys.exit(1) confdir = QStandardPaths.writableLocation(QStandardPaths.ConfigLocation) confdir = confdir.replace('/', os.sep) if confdir.split(os.sep)[-1] != 'qutebrowser': confdir = os.path.join(confdir, 'qutebrowser') confpath = os.path.join(confdir, 'qutebrowser.conf') parser = configparser.ConfigParser() print("config path: {}".format(confpath)) successful = parser.read(confpath, encoding='utf-8') if not successful: raise OSError("configparser did not read files successfully!") lists = parser['content']['host-block-lists'] for url in lists.split(','): print("checking {}...".format(url)) raw_file = urllib.request.urlopen(url) byte_io = io.BytesIO(raw_file.read()) f = adblock.get_fileobj(byte_io) for line in f: if sys.argv[1] in line: print("FOUND {} in {}:".format(sys.argv[1], url)) print(" " + line.rstrip()) if __name__ == '__main__': main()
gpl-3.0
AIML/scikit-learn
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
218
3893
""" ============================================== Feature agglomeration vs. univariate selection ============================================== This example compares 2 dimensionality reduction strategies: - univariate feature selection with Anova - feature agglomeration with Ward hierarchical clustering Both methods are compared in a regression problem using a BayesianRidge as supervised estimator. """ # Author: Alexandre Gramfort <[email protected]> # License: BSD 3 clause print(__doc__) import shutil import tempfile import numpy as np import matplotlib.pyplot as plt from scipy import linalg, ndimage from sklearn.feature_extraction.image import grid_to_graph from sklearn import feature_selection from sklearn.cluster import FeatureAgglomeration from sklearn.linear_model import BayesianRidge from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.externals.joblib import Memory from sklearn.cross_validation import KFold ############################################################################### # Generate data n_samples = 200 size = 40 # image size roi_size = 15 snr = 5. np.random.seed(0) mask = np.ones([size, size], dtype=np.bool) coef = np.zeros((size, size)) coef[0:roi_size, 0:roi_size] = -1. coef[-roi_size:, -roi_size:] = 1. X = np.random.randn(n_samples, size ** 2) for x in X: # smooth data x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel() X -= X.mean(axis=0) X /= X.std(axis=0) y = np.dot(X, coef.ravel()) noise = np.random.randn(y.shape[0]) noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2) y += noise_coef * noise # add noise ############################################################################### # Compute the coefs of a Bayesian Ridge with GridSearch cv = KFold(len(y), 2) # cross-validation generator for model selection ridge = BayesianRidge() cachedir = tempfile.mkdtemp() mem = Memory(cachedir=cachedir, verbose=1) # Ward agglomeration followed by BayesianRidge connectivity = grid_to_graph(n_x=size, n_y=size) ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem) clf = Pipeline([('ward', ward), ('ridge', ridge)]) # Select the optimal number of parcels with grid search clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_agglomeration_ = coef_.reshape(size, size) # Anova univariate feature selection followed by BayesianRidge f_regression = mem.cache(feature_selection.f_regression) # caching function anova = feature_selection.SelectPercentile(f_regression) clf = Pipeline([('anova', anova), ('ridge', ridge)]) # Select the optimal percentage of features with grid search clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_selection_ = coef_.reshape(size, size) ############################################################################### # Inverse the transformation to plot the results on an image plt.close('all') plt.figure(figsize=(7.3, 2.7)) plt.subplot(1, 3, 1) plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("True weights") plt.subplot(1, 3, 2) plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Selection") plt.subplot(1, 3, 3) plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Agglomeration") plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26) plt.show() # Attempt to remove the temporary cachedir, but don't worry if it fails shutil.rmtree(cachedir, ignore_errors=True)
bsd-3-clause
darisandi/geonode
geonode/maps/forms.py
18
1263
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import autocomplete_light from geonode.maps.models import Map from geonode.base.forms import ResourceBaseForm class MapForm(ResourceBaseForm): class Meta(ResourceBaseForm.Meta): model = Map exclude = ResourceBaseForm.Meta.exclude + ( 'zoom', 'projection', 'center_x', 'center_y', ) widgets = autocomplete_light.get_widgets_dict(Map)
gpl-3.0
napkindrawing/ansible
lib/ansible/modules/network/f5/bigip_hostname.py
26
5767
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = { 'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.0' } DOCUMENTATION = ''' --- module: bigip_hostname short_description: Manage the hostname of a BIG-IP. description: - Manage the hostname of a BIG-IP. version_added: "2.3" options: hostname: description: - Hostname of the BIG-IP host. required: True notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) - Matthew Lam (@mryanlam) ''' EXAMPLES = ''' - name: Set the hostname of the BIG-IP bigip_hostname: hostname: "bigip.localhost.localdomain" password: "admin" server: "bigip.localhost.localdomain" user: "admin" delegate_to: localhost ''' RETURN = ''' hostname: description: The new hostname of the device returned: changed type: string sample: "big-ip01.internal" ''' from ansible.module_utils.f5_utils import ( AnsibleF5Client, AnsibleF5Parameters, HAS_F5SDK, F5ModuleError, iControlUnexpectedHTTPError ) class Parameters(AnsibleF5Parameters): api_attributes = ['hostname'] updatables = ['hostname'] returnables = ['hostname'] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result def api_params(self): result = {} for api_attribute in self.api_attributes: if self.api_map is not None and api_attribute in self.api_map: result[api_attribute] = getattr(self, self.api_map[api_attribute]) else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result @property def hostname(self): if self._values['hostname'] is None: return None return str(self._values['hostname']) class ModuleManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(changed) def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 self.changes = Parameters(changed) if changed: return True return False def exec_module(self): result = dict() try: changed = self.update() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def read_current_from_device(self): resource = self.client.api.tm.sys.global_settings.load() result = resource.attrs return Parameters(result) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.client.check_mode: return True self.update_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update_on_device(self): params = self.want.api_params() resource = self.client.api.tm.sys.global_settings.load() resource.modify(**params) self.client.api.tm.cm.devices.exec_cmd( 'mv', name=self.have.hostname, target=self.want.hostname ) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( hostname=dict( required=True, default=None, type='str' ) ) self.f5_product_name = 'bigip' def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name ) try: mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
Toshakins/wagtail
wagtail/wagtailsnippets/views/snippets.py
2
8601
from __future__ import absolute_import, unicode_literals from django.apps import apps from django.core.urlresolvers import reverse from django.http import Http404 from django.shortcuts import get_object_or_404, redirect, render from django.utils.text import capfirst from django.utils.translation import ugettext as _ from wagtail.utils.pagination import paginate from wagtail.wagtailadmin import messages from wagtail.wagtailadmin.edit_handlers import ( ObjectList, extract_panel_definitions_from_model_class) from wagtail.wagtailadmin.forms import SearchForm from wagtail.wagtailadmin.utils import permission_denied from wagtail.wagtailsearch.backends import get_search_backend from wagtail.wagtailsearch.index import class_is_indexed from wagtail.wagtailsnippets.models import get_snippet_models from wagtail.wagtailsnippets.permissions import get_permission_name, user_can_edit_snippet_type # == Helper functions == def get_snippet_model_from_url_params(app_name, model_name): """ Retrieve a model from an app_label / model_name combo. Raise Http404 if the model is not a valid snippet type. """ try: model = apps.get_model(app_name, model_name) except LookupError: raise Http404 if model not in get_snippet_models(): # don't allow people to hack the URL to edit content types that aren't registered as snippets raise Http404 return model SNIPPET_EDIT_HANDLERS = {} def get_snippet_edit_handler(model): if model not in SNIPPET_EDIT_HANDLERS: if hasattr(model, 'edit_handler'): # use the edit handler specified on the page class edit_handler = model.edit_handler else: panels = extract_panel_definitions_from_model_class(model) edit_handler = ObjectList(panels) SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to_model(model) return SNIPPET_EDIT_HANDLERS[model] # == Views == def index(request): snippet_model_opts = [ model._meta for model in get_snippet_models() if user_can_edit_snippet_type(request.user, model)] return render(request, 'wagtailsnippets/snippets/index.html', { 'snippet_model_opts': sorted( snippet_model_opts, key=lambda x: x.verbose_name.lower())}) def list(request, app_label, model_name): model = get_snippet_model_from_url_params(app_label, model_name) permissions = [ get_permission_name(action, model) for action in ['add', 'change', 'delete'] ] if not any([request.user.has_perm(perm) for perm in permissions]): return permission_denied(request) items = model.objects.all() # Preserve the snippet's model-level ordering if specified, but fall back on PK if not # (to ensure pagination is consistent) if not items.ordered: items = items.order_by('pk') # Search is_searchable = class_is_indexed(model) is_searching = False search_query = None if is_searchable and 'q' in request.GET: search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % { 'snippet_type_name': model._meta.verbose_name_plural }) if search_form.is_valid(): search_query = search_form.cleaned_data['q'] search_backend = get_search_backend() items = search_backend.search(search_query, items) is_searching = True else: search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % { 'snippet_type_name': model._meta.verbose_name_plural }) paginator, paginated_items = paginate(request, items) # Template if request.is_ajax(): template = 'wagtailsnippets/snippets/results.html' else: template = 'wagtailsnippets/snippets/type_index.html' return render(request, template, { 'model_opts': model._meta, 'items': paginated_items, 'can_add_snippet': request.user.has_perm(get_permission_name('add', model)), 'is_searchable': is_searchable, 'search_form': search_form, 'is_searching': is_searching, 'query_string': search_query, }) def create(request, app_label, model_name): model = get_snippet_model_from_url_params(app_label, model_name) permission = get_permission_name('add', model) if not request.user.has_perm(permission): return permission_denied(request) instance = model() edit_handler_class = get_snippet_edit_handler(model) form_class = edit_handler_class.get_form_class(model) if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=instance) if form.is_valid(): form.save() messages.success( request, _("{snippet_type} '{instance}' created.").format( snippet_type=capfirst(model._meta.verbose_name), instance=instance ), buttons=[ messages.button(reverse( 'wagtailsnippets:edit', args=(app_label, model_name, instance.id) ), _('Edit')) ] ) return redirect('wagtailsnippets:list', app_label, model_name) else: messages.error(request, _("The snippet could not be created due to errors.")) edit_handler = edit_handler_class(instance=instance, form=form) else: form = form_class(instance=instance) edit_handler = edit_handler_class(instance=instance, form=form) return render(request, 'wagtailsnippets/snippets/create.html', { 'model_opts': model._meta, 'edit_handler': edit_handler, 'form': form, }) def edit(request, app_label, model_name, id): model = get_snippet_model_from_url_params(app_label, model_name) permission = get_permission_name('change', model) if not request.user.has_perm(permission): return permission_denied(request) instance = get_object_or_404(model, id=id) edit_handler_class = get_snippet_edit_handler(model) form_class = edit_handler_class.get_form_class(model) if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=instance) if form.is_valid(): form.save() messages.success( request, _("{snippet_type} '{instance}' updated.").format( snippet_type=capfirst(model._meta.verbose_name_plural), instance=instance ), buttons=[ messages.button(reverse( 'wagtailsnippets:edit', args=(app_label, model_name, instance.id) ), _('Edit')) ] ) return redirect('wagtailsnippets:list', app_label, model_name) else: messages.error(request, _("The snippet could not be saved due to errors.")) edit_handler = edit_handler_class(instance=instance, form=form) else: form = form_class(instance=instance) edit_handler = edit_handler_class(instance=instance, form=form) return render(request, 'wagtailsnippets/snippets/edit.html', { 'model_opts': model._meta, 'instance': instance, 'edit_handler': edit_handler, 'form': form, }) def delete(request, app_label, model_name, id): model = get_snippet_model_from_url_params(app_label, model_name) permission = get_permission_name('delete', model) if not request.user.has_perm(permission): return permission_denied(request) instance = get_object_or_404(model, id=id) if request.method == 'POST': instance.delete() messages.success( request, _("{snippet_type} '{instance}' deleted.").format( snippet_type=capfirst(model._meta.verbose_name_plural), instance=instance ) ) return redirect('wagtailsnippets:list', app_label, model_name) return render(request, 'wagtailsnippets/snippets/confirm_delete.html', { 'model_opts': model._meta, 'instance': instance, }) def usage(request, app_label, model_name, id): model = get_snippet_model_from_url_params(app_label, model_name) instance = get_object_or_404(model, id=id) paginator, used_by = paginate(request, instance.get_usage()) return render(request, "wagtailsnippets/snippets/usage.html", { 'instance': instance, 'used_by': used_by })
bsd-3-clause
germanovm/vdsm
vdsm/gluster/storagedev.py
1
13988
# # Copyright 2015 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # import errno import logging import os import blivet import blivet.formats import blivet.formats.fs import blivet.size from blivet.devices import LVMVolumeGroupDevice from blivet.devices import LVMThinPoolDevice from blivet.devices import LVMLogicalVolumeDevice from blivet.devices import LVMThinLogicalVolumeDevice from blivet import udev from vdsm import utils import fstab import exception as ge from . import makePublic log = logging.getLogger("Gluster") _pvCreateCommandPath = utils.CommandPath("pvcreate", "/sbin/pvcreate", "/usr/sbin/pvcreate",) _vgCreateCommandPath = utils.CommandPath("vgcreate", "/sbin/vgcreate", "/usr/sbin/vgcreate",) _lvconvertCommandPath = utils.CommandPath("lvconvert", "/sbin/lvconvert", "/usr/sbin/lvconvert",) _lvchangeCommandPath = utils.CommandPath("lvchange", "/sbin/lvchange", "/usr/sbin/lvchange",) _vgscanCommandPath = utils.CommandPath("vgscan", "/sbin/vgscan", "/usr/sbin/vgscan",) # All size are in MiB unless otherwise specified DEFAULT_CHUNK_SIZE_KB = 256 DEFAULT_METADATA_SIZE_KB = 16777216 MIN_VG_SIZE = 1048576 MIN_METADATA_PERCENT = 0.005 DEFAULT_FS_TYPE = "xfs" DEFAULT_MOUNT_OPTIONS = "inode64,noatime" def _getDeviceDict(device, createBrick=False): info = {'name': device.name, 'devPath': device.path, 'devUuid': device.uuid or '', 'bus': device.bus or '', 'model': '', 'fsType': '', 'mountPoint': '', 'uuid': '', 'createBrick': createBrick} if isinstance(device.size, blivet.size.Size): info['size'] = '%s' % device.size.convertTo(spec="MiB") else: info['size'] = '%s' % device.size if not info['bus'] and device.parents: info['bus'] = device.parents[0].bus if device.model: info['model'] = "%s (%s)" % (device.model, device.type) else: info['model'] = device.type if device.format: info['uuid'] = device.format.uuid or '' # lvm vg will not have sysfs path if hasattr(udev, 'get_device'): dev = udev.get_device(device.sysfsPath) or {} elif hasattr(udev, 'udev_get_device'): dev = udev.udev_get_device(device.sysfsPath) or {} else: dev = {} info['fsType'] = device.format.type or dev.get('ID_FS_TYPE', '') if hasattr(device.format, 'mountpoint'): info['mountPoint'] = device.format.mountpoint or '' return info def _parseDevices(devices): deviceList = [] for device in devices: deviceList.append(_getDeviceDict(device, _canCreateBrick(device))) return deviceList def _canCreateBrick(device): if not device or device.kids > 0 or device.format.type or \ hasattr(device.format, 'mountpoint') or \ device.type in ['cdrom', 'lvmvg', 'lvmthinpool', 'lvmlv', 'lvmthinlv']: return False return True def _reset_blivet(blivetEnv): try: blivetEnv.reset() except (blivet.errors.UnusableConfigurationError, blivet.errors.StorageError) as e: log.error("Error: %s" % e.message) @makePublic def storageDevicesList(): blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) return _parseDevices(blivetEnv.devices) @makePublic def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE, raidParams={}): def _getDeviceList(devNameList): return [blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1]) for devName in devNameList] def _makePartition(deviceList): pvDeviceList = [] doPartitioning = False for dev in deviceList: if dev.type not in ['disk', 'dm-multipath']: pvDeviceList.append(dev) else: blivetEnv.initializeDisk(dev) part = blivetEnv.newPartition(fmt_type="lvmpv", grow=True, parents=[dev]) blivetEnv.createDevice(part) pvDeviceList.append(part) doPartitioning = True if doPartitioning: blivet.partitioning.doPartitioning(blivetEnv) return pvDeviceList def _createPV(deviceList, alignment=0): def _createAlignedPV(deviceList, alignment): for dev in deviceList: # bz#1178705: Blivet always creates pv with 1MB dataalignment # Workaround: Till blivet fixes the issue, we use lvm pvcreate rc, out, err = utils.execCmd([_pvCreateCommandPath.cmd, '--dataalignment', '%sk' % alignment, dev.path]) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) _reset_blivet(blivetEnv) return _getDeviceList([dev.name for dev in deviceList]) if alignment: blivetEnv.doIt() return _createAlignedPV(deviceList, alignment) for dev in deviceList: lvmpv = blivet.formats.getFormat("lvmpv", device=dev.path) blivetEnv.formatDevice(dev, lvmpv) blivet.partitioning.doPartitioning(blivetEnv) return deviceList def _createVG(vgName, deviceList, stripeSize=0): if stripeSize: # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = utils.execCmd([_vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() vg = blivetEnv.devicetree.getDeviceByName(vgName) else: vg = LVMVolumeGroupDevice(vgName, parents=deviceList) blivetEnv.createDevice(vg) return vg def _createThinPool(poolName, vg, alignment=0, poolMetaDataSize=0, poolDataSize=0): if not alignment: # bz#1180228: blivet doesn't handle percentage-based sizes properly # Workaround: Till the bz gets fixed, we take only 99% size from vg pool = LVMThinPoolDevice(poolName, parents=[vg], size=(vg.size * 99 / 100), grow=True) blivetEnv.createDevice(pool) return pool else: metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice( metaName, parents=[vg], size=blivet.size.Size('%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice( poolName, parents=[vg], size=blivet.size.Size('%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = utils.execCmd([_lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y']) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = utils.execCmd([_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) _reset_blivet(blivetEnv) return blivetEnv.devicetree.getDeviceByName(poolLv.name) if os.path.ismount(mountPoint): raise ge.GlusterHostStorageMountPointInUseException(mountPoint) vgName = "vg-" + brickName poolName = "pool-" + brickName alignment = 0 chunkSize = 0 poolDataSize = 0 count = 0 metaDataSize = DEFAULT_METADATA_SIZE_KB if raidParams.get('type') == '6': count = raidParams['pdCount'] - 2 alignment = raidParams['stripeSize'] * count chunkSize = alignment elif raidParams.get('type') == '10': count = raidParams['pdCount'] / 2 alignment = raidParams['stripeSize'] * count chunkSize = DEFAULT_CHUNK_SIZE_KB blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) # get the devices list from the device name deviceList = _getDeviceList(devNameList) # raise an error when any device not actually found in the given list notFoundList = set(devNameList).difference( set([dev.name for dev in deviceList])) if notFoundList: raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList) # raise an error when any device is used already in the given list inUseList = set(devNameList).difference(set([not _canCreateBrick( dev) or dev.name for dev in deviceList])) if inUseList: raise ge.GlusterHostStorageDeviceInUseException(inUseList) pvDeviceList = _makePartition(deviceList) pvDeviceList = _createPV(pvDeviceList, alignment) vg = _createVG(vgName, pvDeviceList, raidParams.get('stripeSize', 0)) # The following calculation is based on the redhat storage performance doc # http://docbuilder.usersys.redhat.com/22522 # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance # create ~16GB metadata LV (metaDataSize) that has a size which is # a multiple of RAID stripe width if it is > minimum vg size # otherwise allocate a minimum of 0.5% of the data device size # and create data LV (poolDataSize) that has a size which is # a multiple of stripe width if alignment: vgSizeKib = int(vg.size.convertTo(spec="KiB")) if vg.size.convertTo(spec='MiB') < MIN_VG_SIZE: metaDataSize = vgSizeKib * MIN_METADATA_PERCENT poolDataSize = vgSizeKib - metaDataSize metaDataSize = (metaDataSize - (metaDataSize % alignment)) poolDataSize = (poolDataSize - (poolDataSize % alignment)) # Creating a thin pool from the data LV and the metadata LV # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool # --poolmetadata VOLGROUP/metadata_device_name pool = _createThinPool(poolName, vg, chunkSize, metaDataSize, poolDataSize) thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool], size=pool.size, grow=True) blivetEnv.createDevice(thinlv) blivetEnv.doIt() if fsType != DEFAULT_FS_TYPE: log.error("fstype %s is currently unsupported" % fsType) raise ge.GlusterHostStorageDeviceMkfsFailedException( thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType) format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path) format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"] if raidParams.get('type') == '6': format._defaultFormatOptions += ["-d", "sw=%s,su=%sk" % ( count, raidParams.get('stripeSize'))] blivetEnv.formatDevice(thinlv, format) blivetEnv.doIt() try: os.makedirs(mountPoint) except OSError as e: if errno.EEXIST != e.errno: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostStorageDeviceMakeDirsFailedException( err=[errMsg]) thinlv.format.setup(mountpoint=mountPoint) blivetEnv.doIt() # bz#1230495: lvm devices are invisible and appears only after vgscan # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices rc, out, err = utils.execCmd([_vgscanCommandPath.cmd]) if rc: raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err) fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE) return _getDeviceDict(thinlv)
gpl-2.0
pgjones/jinja
tests/test_security.py
23
6015
# -*- coding: utf-8 -*- """ jinja2.testsuite.security ~~~~~~~~~~~~~~~~~~~~~~~~~ Checks the sandbox and other security features. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import pytest from jinja2 import Environment from jinja2.sandbox import SandboxedEnvironment, \ ImmutableSandboxedEnvironment, unsafe from jinja2 import Markup, escape from jinja2.exceptions import SecurityError, TemplateSyntaxError, \ TemplateRuntimeError from jinja2._compat import text_type class PrivateStuff(object): def bar(self): return 23 @unsafe def foo(self): return 42 def __repr__(self): return 'PrivateStuff' class PublicStuff(object): bar = lambda self: 23 _foo = lambda self: 42 def __repr__(self): return 'PublicStuff' @pytest.mark.sandbox class TestSandbox(): def test_unsafe(self, env): env = SandboxedEnvironment() pytest.raises(SecurityError, env.from_string("{{ foo.foo() }}").render, foo=PrivateStuff()) assert env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()) == '23' pytest.raises(SecurityError, env.from_string("{{ foo._foo() }}").render, foo=PublicStuff()) assert env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()) == '23' assert env.from_string("{{ foo.__class__ }}").render(foo=42) == '' assert env.from_string("{{ foo.func_code }}").render(foo=lambda:None) == '' # security error comes from __class__ already. pytest.raises(SecurityError, env.from_string( "{{ foo.__class__.__subclasses__() }}").render, foo=42) def test_immutable_environment(self, env): env = ImmutableSandboxedEnvironment() pytest.raises(SecurityError, env.from_string( '{{ [].append(23) }}').render) pytest.raises(SecurityError, env.from_string( '{{ {1:2}.clear() }}').render) def test_restricted(self, env): env = SandboxedEnvironment() pytest.raises(TemplateSyntaxError, env.from_string, "{% for item.attribute in seq %}...{% endfor %}") pytest.raises(TemplateSyntaxError, env.from_string, "{% for foo, bar.baz in seq %}...{% endfor %}") def test_markup_operations(self, env): # adding two strings should escape the unsafe one unsafe = '<script type="application/x-some-script">alert("foo");</script>' safe = Markup('<em>username</em>') assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe) # string interpolations are safe to use too assert Markup('<em>%s</em>') % '<bad user>' == \ '<em>&lt;bad user&gt;</em>' assert Markup('<em>%(username)s</em>') % { 'username': '<bad user>' } == '<em>&lt;bad user&gt;</em>' # an escaped object is markup too assert type(Markup('foo') + 'bar') is Markup # and it implements __html__ by returning itself x = Markup("foo") assert x.__html__() is x # it also knows how to treat __html__ objects class Foo(object): def __html__(self): return '<em>awesome</em>' def __unicode__(self): return 'awesome' assert Markup(Foo()) == '<em>awesome</em>' assert Markup('<strong>%s</strong>') % Foo() == \ '<strong><em>awesome</em></strong>' # escaping and unescaping assert escape('"<>&\'') == '&#34;&lt;&gt;&amp;&#39;' assert Markup("<em>Foo &amp; Bar</em>").striptags() == "Foo & Bar" assert Markup("&lt;test&gt;").unescape() == "<test>" def test_template_data(self, env): env = Environment(autoescape=True) t = env.from_string('{% macro say_hello(name) %}' '<p>Hello {{ name }}!</p>{% endmacro %}' '{{ say_hello("<blink>foo</blink>") }}') escaped_out = '<p>Hello &lt;blink&gt;foo&lt;/blink&gt;!</p>' assert t.render() == escaped_out assert text_type(t.module) == escaped_out assert escape(t.module) == escaped_out assert t.module.say_hello('<blink>foo</blink>') == escaped_out assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out def test_attr_filter(self, env): env = SandboxedEnvironment() tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}') pytest.raises(SecurityError, tmpl.render, cls=int) def test_binary_operator_intercepting(self, env): def disable_op(left, right): raise TemplateRuntimeError('that operator so does not work') for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'): env = SandboxedEnvironment() env.binop_table['+'] = disable_op t = env.from_string('{{ %s }}' % expr) assert t.render(ctx) == rv env.intercepted_binops = frozenset(['+']) t = env.from_string('{{ %s }}' % expr) try: t.render(ctx) except TemplateRuntimeError as e: pass else: assert False, 'expected runtime error' def test_unary_operator_intercepting(self, env): def disable_op(arg): raise TemplateRuntimeError('that operator so does not work') for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'): env = SandboxedEnvironment() env.unop_table['-'] = disable_op t = env.from_string('{{ %s }}' % expr) assert t.render(ctx) == rv env.intercepted_unops = frozenset(['-']) t = env.from_string('{{ %s }}' % expr) try: t.render(ctx) except TemplateRuntimeError as e: pass else: assert False, 'expected runtime error'
bsd-3-clause
magfest/ubersystem
alembic/versions/e1d3c11eb9dd_add_adult_panels_and_tables.py
1
1928
"""Add adult panels and tables Revision ID: e1d3c11eb9dd Revises: 1f862611ba04 Create Date: 2018-06-21 23:06:32.678061 """ # revision identifiers, used by Alembic. revision = 'e1d3c11eb9dd' down_revision = '1f862611ba04' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa try: is_sqlite = op.get_context().dialect.name == 'sqlite' except Exception: is_sqlite = False if is_sqlite: op.get_context().connection.execute('PRAGMA foreign_keys=ON;') utcnow_server_default = "(datetime('now', 'utc'))" else: utcnow_server_default = "timezone('utc', current_timestamp)" def sqlite_column_reflect_listener(inspector, table, column_info): """Adds parenthesis around SQLite datetime defaults for utcnow.""" if column_info['default'] == "datetime('now', 'utc')": column_info['default'] = utcnow_server_default sqlite_reflect_kwargs = { 'listeners': [('column_reflect', sqlite_column_reflect_listener)] } # =========================================================================== # HOWTO: Handle alter statements in SQLite # # def upgrade(): # if is_sqlite: # with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: # batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False) # else: # op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False) # # =========================================================================== def upgrade(): op.add_column('art_show_application', sa.Column('panels_ad', sa.Integer(), server_default='0', nullable=False)) op.add_column('art_show_application', sa.Column('tables_ad', sa.Integer(), server_default='0', nullable=False)) def downgrade(): op.drop_column('art_show_application', 'tables_ad') op.drop_column('art_show_application', 'panels_ad')
agpl-3.0
jspan/Open-Knesset
laws/forms.py
8
8038
from django import forms from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from datetime import date from tagging.models import Tag from models import (Vote, Bill, KnessetProposal, BillBudgetEstimation, CONVERT_TO_DISCUSSION_HEADERS) from vote_choices import (ORDER_CHOICES, TAGGED_CHOICES, TYPE_CHOICES, SIMPLE_TYPE_CHOICES, BILL_TAGGED_CHOICES, BILL_STAGE_CHOICES, BILL_AGRR_STAGES) STAGE_CHOICES = ( ('all', _('All')), ) LINK_ERRORS = { 'DUP_FIRST': _('Bill already has a First Vote linked to it'), 'DUP_APPROVE': _('Bill already has an Approval Vote linked to it'), 'ALREADY_LINKED': _('Vote is already linked as Approval Vote of another bill'), } class AttachBillFromVoteForm(forms.Form): """Form for attaching a vote to a bill from the vote page.""" vote_model = forms.ModelChoiceField(queryset=Vote.objects.all(), widget=forms.HiddenInput, required=True) vote_type = forms.ChoiceField(label=_('Vote Type'), choices=SIMPLE_TYPE_CHOICES, required=True) bill_model = forms.ModelChoiceField(label=_('Bill'), queryset=Bill.objects.all(), widget=forms.TextInput, required=True) def __init__(self, vote, *args, **kwargs): super(AttachBillFromVoteForm, self).__init__(*args, **kwargs) self.fields['vote_model'].initial = vote self.fields['vote_type'].initial = self.get_default_vote_type(vote) def clean(self): cleaned_data = super(AttachBillFromVoteForm, self).clean() vote_type = cleaned_data.get('vote_type') bill = cleaned_data.get('bill_model') if vote_type == 'first vote' and bill.first_vote is not None: raise forms.ValidationError( LINK_ERRORS['DUP_FIRST'], code="cannot-link") elif vote_type == 'approve vote': if bill.approval_vote is not None: raise forms.ValidationError( LINK_ERRORS['DUP_APPROVE'], code="cannot-link") vote = cleaned_data.get('vote_model') vote_already_linked = Bill.objects\ .filter(approval_vote=vote).count() > 0 if vote_already_linked: raise forms.ValidationError( LINK_ERRORS['ALREADY_LINKED'], code="cannot-link") return cleaned_data def get_default_vote_type(self, vote): for h in CONVERT_TO_DISCUSSION_HEADERS: if vote.title.find(h) >= 0: return 'pre vote' if vote.vote_type == 'law-approve': return 'approve vote' return None class BudgetEstimateForm(forms.Form): """Form for submitting the budget estimation of a given bill, for a few types of budget.""" be_one_time_gov = forms.IntegerField(label=_('One-time costs to government'), required=False) be_yearly_gov = forms.IntegerField(label=_('Yearly costs to government'), required=False) be_one_time_ext = forms.IntegerField(label=_('One-time costs to external bodies'), required=False) be_yearly_ext = forms.IntegerField(label=_('Yearly costs to external bodies'), required=False) be_summary = forms.CharField(label=_('Summary of the estimation'),widget=forms.Textarea,required=False) def __init__(self, bill, user, *args, **kwargs): super(BudgetEstimateForm, self).__init__(*args, **kwargs) if bill is not None and user is not None: try: be = BillBudgetEstimation.objects.get(bill=bill,estimator__username=str(user)) self.fields['be_one_time_gov'].initial = be.one_time_gov self.fields['be_yearly_gov'].initial = be.yearly_gov self.fields['be_one_time_ext'].initial = be.one_time_ext self.fields['be_yearly_ext'].initial = be.yearly_ext self.fields['be_summary'].initial = be.summary except BillBudgetEstimation.DoesNotExist: pass #self.fields['tagged'].choices = new_choices class VoteSelectForm(forms.Form): """Votes filtering form""" vtype = forms.ChoiceField(label=_('Vote types'), choices=TYPE_CHOICES, required=False, initial='all') tagged = forms.ChoiceField(label=_('Tags'), choices=TAGGED_CHOICES, required=False, initial='all') order = forms.ChoiceField(label=_('Order by'), choices=ORDER_CHOICES, required=False, initial='time') from_date = forms.DateField(label=_('From date'), required=False) to_date = forms.DateField(label=_('To date'), required=False, initial=date.today) exclude_user_agendas = forms.BooleanField(label=_('Exclude my agendas'), required=False, initial=False) exclude_ascribed = forms.BooleanField( label=_('Exclude votes ascribed to bills'), required=False, initial=False) def __init__(self, *args, **kwargs): super(VoteSelectForm, self).__init__(*args, **kwargs) tags = Tag.objects.usage_for_model(Vote) new_choices = list(TAGGED_CHOICES) new_choices.extend([(t.name, t.name) for t in tags]) self.fields['tagged'].choices = new_choices class BillSelectForm(forms.Form): """Bill filtering form""" stage = forms.ChoiceField(label=_('Bill Stage'), choices=BILL_STAGE_CHOICES, required=False, initial='all') tagged = forms.ChoiceField(label=_('Tags'), choices=BILL_TAGGED_CHOICES, required=False, initial='all') changed_after = forms.DateField(label=_('Stage Changed After:'), required=False, input_formats=["%d/%m/%Y", "%d/%m/%y"]) changed_before = forms.DateField(label=_('Stage Chaged Before:'), required=False, input_formats=["%d/%m/%Y", "%d/%m/%y"]) pp_id = forms.IntegerField(required=False, label=_('Private proposal ID')) knesset_booklet = forms.IntegerField(required=False, label=_('Knesset booklet')) gov_booklet = forms.IntegerField(required=False, label=_('Government booklet')) # TODO: add more filter options: # order = forms.ChoiceField(label=_('Order by'), choices=ORDER_CHOICES, # required=False, initial='time') # from_date = forms.DateField(label=_('From date'), required=False) # to_date = forms.DateField(label=_('To date'), required=False, # initial=date.today) def __init__(self, *args, **kwargs): super(BillSelectForm, self).__init__(*args, **kwargs) tags = Tag.objects.usage_for_model(Bill) new_choices = list(BILL_TAGGED_CHOICES) new_choices.extend([(t.name, t.name) for t in tags]) self.fields['tagged'].choices = new_choices new_stages = list(STAGE_CHOICES) new_stages.extend(BILL_STAGE_CHOICES) self.fields['stage'].choices = new_stages def clean(self): super(BillSelectForm, self).clean() #override stage error on aggregate stages (when accessing from mk page) if ((self.data.get('stage') in BILL_AGRR_STAGES) and ('stage' in self._errors)): del self._errors['stage'] self.cleaned_data['stage'] = self.data.get('stage') return self.cleaned_data
bsd-3-clause
edx/ecommerce
ecommerce/extensions/api/v2/views/refunds.py
1
6962
"""HTTP endpoints for interacting with refunds.""" import logging from django.contrib.auth import get_user_model from django.db import transaction from django.utils.decorators import method_decorator from oscar.core.loading import get_model from rest_framework import generics, status from rest_framework.exceptions import ParseError from rest_framework.permissions import IsAdminUser, IsAuthenticated from rest_framework.response import Response from ecommerce.core.exceptions import MissingLmsUserIdException from ecommerce.extensions.api import serializers from ecommerce.extensions.api.exceptions import BadRequestException from ecommerce.extensions.api.permissions import CanActForUser from ecommerce.extensions.refund.api import ( create_refunds, create_refunds_for_entitlement, find_orders_associated_with_course ) Order = get_model('order', 'Order') OrderLine = get_model('order', 'Line') Refund = get_model('refund', 'Refund') User = get_user_model() logger = logging.getLogger(__name__) class RefundCreateView(generics.CreateAPIView): """Creates refunds. Given a username and course ID or an order number and a course entitlement, this view finds and creates a refund for each order matching the following criteria: * Order was placed by the User linked to username. * Order is in the COMPLETE state. * Order has at least one line item associated with the course ID or Course Entitlement. Note that only the line items associated with the course ID will be refunded. Items associated with a different course ID, or not associated with any course ID, will NOT be refunded. With the exception of superusers, users may only create refunds for themselves. Attempts to create refunds for other users will fail with HTTP 403. If refunds are created, a list of the refund IDs will be returned along with HTTP 201. If no refunds are created, HTTP 200 will be returned. """ permission_classes = (IsAuthenticated, CanActForUser) def get_serializer(self, *args, **kwargs): return None def create(self, request, *args, **kwargs): """ Creates refunds, if eligible orders exist. This supports creating refunds for both course runs as well as course entitlements. Arguments: username (string): This is required by both types of refund course_run refund: course_id (string): The course_id for which to refund for the given user course_entitlement refund: order_number (string): The order for which to refund the course entitlement entitlement_uuid (string): The UUID for the course entitlement for the given order to refund Returns: refunds (list): List of refunds created Side effects: If the given user does not have an LMS user id, tries to find it. If found, adds the id to the user and saves the user. If the id cannot be found, writes custom metrics to record this fact. """ course_id = request.data.get('course_id') username = request.data.get('username') order_number = request.data.get('order_number') entitlement_uuid = request.data.get('entitlement_uuid') refunds = [] # We should always have a username value as long as CanActForUser is in place. if not username: # pragma: no cover raise BadRequestException('No username specified.') try: user = User.objects.get(username=username) except User.DoesNotExist: raise BadRequestException('User "{}" does not exist.'.format(username)) # Ensure the user has an LMS user id try: if request.user.is_authenticated: requested_by = request.user.id else: # pragma: no cover requested_by = None called_from = u'refund processing for user {user_id} requested by {requested_by}'.format( user_id=user.id, requested_by=requested_by) user.add_lms_user_id('ecommerce_missing_lms_user_id_refund', called_from) except MissingLmsUserIdException: raise BadRequestException('User {} does not have an LMS user id.'.format(user.id)) # Try and create a refund for the passed in order if entitlement_uuid: try: order = user.orders.get(number=order_number) refunds = create_refunds_for_entitlement(order, entitlement_uuid) except (Order.DoesNotExist, OrderLine.DoesNotExist): raise BadRequestException('Order {} does not exist.'.format(order_number)) else: if not course_id: raise BadRequestException('No course_id specified.') # We can only create refunds if the user has orders. if user.orders.exists(): orders = find_orders_associated_with_course(user, course_id) refunds = create_refunds(orders, course_id) # Return HTTP 201 if we created refunds. if refunds: refund_ids = [refund.id for refund in refunds] return Response(refund_ids, status=status.HTTP_201_CREATED) # Return HTTP 200 if we did NOT create refunds. return Response([], status=status.HTTP_200_OK) @method_decorator(transaction.non_atomic_requests, name='dispatch') class RefundProcessView(generics.UpdateAPIView): """Process--approve or deny--refunds. This view can be used to approve, or deny, a Refund. Under normal conditions, the view returns HTTP status 200 and a serialized Refund. In the event of an error, the view will still return a serialized Refund (to reflect any changed statuses); however, HTTP status will be 500. Only staff users are permitted to use this view. """ permission_classes = (IsAuthenticated, IsAdminUser,) queryset = Refund.objects.all() serializer_class = serializers.RefundSerializer def update(self, request, *args, **kwargs): APPROVE = 'approve' DENY = 'deny' APPROVE_PAYMENT_ONLY = 'approve_payment_only' action = request.data.get('action', '').lower() if action not in (APPROVE, DENY, APPROVE_PAYMENT_ONLY): raise ParseError('The action [{}] is not valid.'.format(action)) with transaction.atomic(): refund = self.get_object() result = False if action in (APPROVE, APPROVE_PAYMENT_ONLY): revoke_fulfillment = action == APPROVE result = refund.approve(revoke_fulfillment=revoke_fulfillment) elif action == DENY: result = refund.deny() http_status = status.HTTP_200_OK if result else status.HTTP_500_INTERNAL_SERVER_ERROR serializer = self.get_serializer(refund) return Response(serializer.data, status=http_status)
agpl-3.0
jcoady9/python-for-android
python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/rijndael.py
359
11341
""" A pure python (slow) implementation of rijndael with a decent interface To include - from rijndael import rijndael To do a key setup - r = rijndael(key, block_size = 16) key must be a string of length 16, 24, or 32 blocksize must be 16, 24, or 32. Default is 16 To use - ciphertext = r.encrypt(plaintext) plaintext = r.decrypt(ciphertext) If any strings are of the wrong length a ValueError is thrown """ # ported from the Java reference code by Bram Cohen, [email protected], April 2001 # this code is public domain, unless someone makes # an intellectual property claim against the reference # code, in which case it can be made public domain by # deleting all the comments and renaming all the variables import copy import string #----------------------- #TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN #2.4..... import os if os.name != "java": import exceptions if hasattr(exceptions, "FutureWarning"): import warnings warnings.filterwarnings("ignore", category=FutureWarning, append=1) #----------------------- shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]], [[0, 0], [1, 5], [2, 4], [3, 3]], [[0, 0], [1, 7], [3, 5], [4, 4]]] # [keysize][block_size] num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}} A = [[1, 1, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0, 1, 1], [1, 1, 1, 1, 0, 0, 0, 1]] # produce log and alog tables, needed for multiplying in the # field GF(2^m) (generator = 3) alog = [1] for i in xrange(255): j = (alog[-1] << 1) ^ alog[-1] if j & 0x100 != 0: j ^= 0x11B alog.append(j) log = [0] * 256 for i in xrange(1, 255): log[alog[i]] = i # multiply two elements of GF(2^m) def mul(a, b): if a == 0 or b == 0: return 0 return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] # substitution box based on F^{-1}(x) box = [[0] * 8 for i in xrange(256)] box[1][7] = 1 for i in xrange(2, 256): j = alog[255 - log[i]] for t in xrange(8): box[i][t] = (j >> (7 - t)) & 0x01 B = [0, 1, 1, 0, 0, 0, 1, 1] # affine transform: box[i] <- B + A*box[i] cox = [[0] * 8 for i in xrange(256)] for i in xrange(256): for t in xrange(8): cox[i][t] = B[t] for j in xrange(8): cox[i][t] ^= A[t][j] * box[i][j] # S-boxes and inverse S-boxes S = [0] * 256 Si = [0] * 256 for i in xrange(256): S[i] = cox[i][0] << 7 for t in xrange(1, 8): S[i] ^= cox[i][t] << (7-t) Si[S[i] & 0xFF] = i # T-boxes G = [[2, 1, 1, 3], [3, 2, 1, 1], [1, 3, 2, 1], [1, 1, 3, 2]] AA = [[0] * 8 for i in xrange(4)] for i in xrange(4): for j in xrange(4): AA[i][j] = G[i][j] AA[i][i+4] = 1 for i in xrange(4): pivot = AA[i][i] if pivot == 0: t = i + 1 while AA[t][i] == 0 and t < 4: t += 1 assert t != 4, 'G matrix must be invertible' for j in xrange(8): AA[i][j], AA[t][j] = AA[t][j], AA[i][j] pivot = AA[i][i] for j in xrange(8): if AA[i][j] != 0: AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] for t in xrange(4): if i != t: for j in xrange(i+1, 8): AA[t][j] ^= mul(AA[i][j], AA[t][i]) AA[t][i] = 0 iG = [[0] * 4 for i in xrange(4)] for i in xrange(4): for j in xrange(4): iG[i][j] = AA[i][j + 4] def mul4(a, bs): if a == 0: return 0 r = 0 for b in bs: r <<= 8 if b != 0: r = r | mul(a, b) return r T1 = [] T2 = [] T3 = [] T4 = [] T5 = [] T6 = [] T7 = [] T8 = [] U1 = [] U2 = [] U3 = [] U4 = [] for t in xrange(256): s = S[t] T1.append(mul4(s, G[0])) T2.append(mul4(s, G[1])) T3.append(mul4(s, G[2])) T4.append(mul4(s, G[3])) s = Si[t] T5.append(mul4(s, iG[0])) T6.append(mul4(s, iG[1])) T7.append(mul4(s, iG[2])) T8.append(mul4(s, iG[3])) U1.append(mul4(t, iG[0])) U2.append(mul4(t, iG[1])) U3.append(mul4(t, iG[2])) U4.append(mul4(t, iG[3])) # round constants rcon = [1] r = 1 for t in xrange(1, 30): r = mul(2, r) rcon.append(r) del A del AA del pivot del B del G del box del log del alog del i del j del r del s del t del mul del mul4 del cox del iG class rijndael: def __init__(self, key, block_size = 16): if block_size != 16 and block_size != 24 and block_size != 32: raise ValueError('Invalid block size: ' + str(block_size)) if len(key) != 16 and len(key) != 24 and len(key) != 32: raise ValueError('Invalid key size: ' + str(len(key))) self.block_size = block_size ROUNDS = num_rounds[len(key)][block_size] BC = block_size / 4 # encryption round keys Ke = [[0] * BC for i in xrange(ROUNDS + 1)] # decryption round keys Kd = [[0] * BC for i in xrange(ROUNDS + 1)] ROUND_KEY_COUNT = (ROUNDS + 1) * BC KC = len(key) / 4 # copy user material bytes into temporary ints tk = [] for i in xrange(0, KC): tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) # copy values into round key arrays t = 0 j = 0 while j < KC and t < ROUND_KEY_COUNT: Ke[t / BC][t % BC] = tk[j] Kd[ROUNDS - (t / BC)][t % BC] = tk[j] j += 1 t += 1 tt = 0 rconpointer = 0 while t < ROUND_KEY_COUNT: # extrapolate using phi (the round key evolution function) tt = tk[KC - 1] tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ (S[ tt & 0xFF] & 0xFF) << 8 ^ \ (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ (rcon[rconpointer] & 0xFF) << 24 rconpointer += 1 if KC != 8: for i in xrange(1, KC): tk[i] ^= tk[i-1] else: for i in xrange(1, KC / 2): tk[i] ^= tk[i-1] tt = tk[KC / 2 - 1] tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ (S[(tt >> 24) & 0xFF] & 0xFF) << 24 for i in xrange(KC / 2 + 1, KC): tk[i] ^= tk[i-1] # copy values into round key arrays j = 0 while j < KC and t < ROUND_KEY_COUNT: Ke[t / BC][t % BC] = tk[j] Kd[ROUNDS - (t / BC)][t % BC] = tk[j] j += 1 t += 1 # inverse MixColumn where needed for r in xrange(1, ROUNDS): for j in xrange(BC): tt = Kd[r][j] Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ U2[(tt >> 16) & 0xFF] ^ \ U3[(tt >> 8) & 0xFF] ^ \ U4[ tt & 0xFF] self.Ke = Ke self.Kd = Kd def encrypt(self, plaintext): if len(plaintext) != self.block_size: raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) Ke = self.Ke BC = self.block_size / 4 ROUNDS = len(Ke) - 1 if BC == 4: SC = 0 elif BC == 6: SC = 1 else: SC = 2 s1 = shifts[SC][1][0] s2 = shifts[SC][2][0] s3 = shifts[SC][3][0] a = [0] * BC # temporary work array t = [] # plaintext to ints + key for i in xrange(BC): t.append((ord(plaintext[i * 4 ]) << 24 | ord(plaintext[i * 4 + 1]) << 16 | ord(plaintext[i * 4 + 2]) << 8 | ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) # apply round transforms for r in xrange(1, ROUNDS): for i in xrange(BC): a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] t = copy.copy(a) # last round is special result = [] for i in xrange(BC): tt = Ke[ROUNDS][i] result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) return string.join(map(chr, result), '') def decrypt(self, ciphertext): if len(ciphertext) != self.block_size: raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) Kd = self.Kd BC = self.block_size / 4 ROUNDS = len(Kd) - 1 if BC == 4: SC = 0 elif BC == 6: SC = 1 else: SC = 2 s1 = shifts[SC][1][1] s2 = shifts[SC][2][1] s3 = shifts[SC][3][1] a = [0] * BC # temporary work array t = [0] * BC # ciphertext to ints + key for i in xrange(BC): t[i] = (ord(ciphertext[i * 4 ]) << 24 | ord(ciphertext[i * 4 + 1]) << 16 | ord(ciphertext[i * 4 + 2]) << 8 | ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] # apply round transforms for r in xrange(1, ROUNDS): for i in xrange(BC): a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] t = copy.copy(a) # last round is special result = [] for i in xrange(BC): tt = Kd[ROUNDS][i] result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) return string.join(map(chr, result), '') def encrypt(key, block): return rijndael(key, len(block)).encrypt(block) def decrypt(key, block): return rijndael(key, len(block)).decrypt(block) def test(): def t(kl, bl): b = 'b' * bl r = rijndael('a' * kl, bl) assert r.decrypt(r.encrypt(b)) == b t(16, 16) t(16, 24) t(16, 32) t(24, 16) t(24, 24) t(24, 32) t(32, 16) t(32, 24) t(32, 32)
apache-2.0
TanPhongPhan/ABC4GSD
ABC v3/library/basic_frames/ApplicationList.py
2
6114
#!/usr/bin/env python # -*- coding: utf8 -*- import wx import sys, os sys.path.append(os.getcwd()) import library.constants as CO from library.utils import utils as UT from library.client.ABCAppInterface import ABCAppInterface class ApplicationList(wx.Frame, ABCAppInterface): def __init__(self, parent, id, title, param): wx.Frame.__init__(self, parent, id, title) ABCAppInterface.__init__(self, 'ApplicationList') self._definedProperties = ['pos_x', 'pos_y', 'dim_x', 'dim_y'] self.lastActivity=None self.__order = [ ('ID', '_id'), ('Name', 'name'), ('Status', 'state')] self.__conversion = {} self.__conversion['state'] = {CO.application_DETACHED:'Detached', CO.application_PINNED:'Pinned', CO.application_FULLSYNC:'Fullsync', CO.abc_UNKNOWN:'Unknown', CO.abc_INITIALIZED:'Initialized'} self.__obj_content = {} self.__obj_property = {} self.__obj_content["list"] = [ "ListCtrl", wx.ListCtrl(self, style=wx.LC_REPORT | wx.BORDER_NONE | wx.LC_EDIT_LABELS | wx.LC_SORT_ASCENDING) ] for i, x in enumerate(self.__order): self.__obj_content["list"][1].InsertColumn(i, x[0]) self.CreateStatusBar() menuBar = wx.MenuBar() # filemenu= wx.Menu() # # filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program") # menuBar.Append(filemenu,"&File") # # filemenu= wx.Menu() # filemenu.Append(CO.menu_USER_ADD,"New"," Create new app") # filemenu.Append(CO.menu_USER_MODIFY,"Modify"," Modify current app") # # menuBar.Append(filemenu,"&App") # # filemenu= wx.Menu() # filemenu.Append(CO.menu_APPLICATION_PIN,"Pin"," Share the application with the activity partecipants") # menuBar.Append(filemenu,"&Action") self.SetMenuBar(menuBar) wx.EVT_MENU(self, wx.ID_EXIT, self.onExit) self.Bind(wx.EVT_CLOSE, self.onClose) self.Bind(wx.EVT_MENU, self.onNew, id=CO.menu_USER_ADD) self.Bind(wx.EVT_MENU, self.onModify, id=CO.menu_USER_MODIFY) box = wx.BoxSizer( wx.VERTICAL ) box.Add( self.__obj_content[ "list" ][1], 1, wx.EXPAND ) self.SetSizer( box ) self.SetAutoLayout(1) box.Fit(self) self.resume() self.Show(1) def killOperation(self): self.Close(True) def suspendOperation(self): self.setProperty('pos_x', self.GetPosition()[0]) self.setProperty('pos_y', self.GetPosition()[1]) self.setProperty('dim_x', self.GetSize()[0]) self.setProperty('dim_y', self.GetSize()[1]) def resumeOperation(self): try: self.SetPosition( (int(self.getProperty('pos_x')), int(self.getProperty('pos_y'))) ) self.SetSize( (int(self.getProperty('dim_x')), int(self.getProperty('dim_y'))) ) except: pass self.deleteAllItems(True) self.refreshApplications() self.setApplicationList() for x in self.__obj_property.keys(): self.standardSubscribtion(x) def standardSubscribtion(self, id): self.subscribe('abc.application.%s.state'% (id, ), self.changeState ) # self.subscribe('abc.application.%s.name'% (id, ), self.changeName ) def personalHandler(self, ch, msg): type = msg[0] msg = msg[1] if type == 'INFO': actId = long(msg.split(' ', 1)[0]) field = msg.split(' ', 1)[1].split('=')[0] value = msg.split('=')[1] x = self.getapp(actId) if x == None: x = {} self.__obj_property["apps"][actId] = x self.__obj_property["apps"][actId][field] = value self.SetappList(actId) if type == 'CMD': if 'INIT' in msg: self.DeleteAllItems(True) def refreshApplications(self): resp = self._query('abc.activity.%s.application'%(self._actId, )) if isinstance(resp, long): resp = [resp] elif isinstance(resp, str): resp = eval(resp) if resp == None: return for id in resp: if id in self.__obj_property.keys(): del self.__obj_property[id] self.__obj_property[id] = [id] for y in self.__order[1:]: self.__obj_property[id].append( self._query('abc.application.%s.%s'%(id, y[1])) ) def deleteAllItems(self, content = False): self.__obj_content["list"][1].DeleteAllItems() if content: self.__obj_property = {} def setApplicationList(self): self.deleteAllItems() for i, x in enumerate(self.__obj_property.keys()): for l, y in enumerate(self.__order): val = self.__obj_property[x][l] if y[1] in self.__conversion.keys(): val = self.__conversion[y[1]][int(val)] if isinstance(val, int) or isinstance(val, long): val = str(val) if not l: self.__obj_content["list"][1].InsertStringItem(i, val) else: self.__obj_content["list"][1].SetStringItem(i, l, val) def onNew(self, event): pass def onModify(self, event): pass def onExit(self,e): self.Close( True ) def onClose(self, e): #self.Suspend() self.Destroy() def changeState(self, wip): wip = wip.split('.') state = wip[-1] id = long(wip[2]) self.__obj_property[id][2] = state self.setApplicationList() def main( param = None ): global app app = wx.PySimpleApp() frame = ApplicationList(None,-1,"Application List", param) app.SetExitOnFrameDelete(True) app.MainLoop() if __name__ == "__main__": main()
mit
aabbox/kbengine
kbe/res/scripts/common/Lib/test/test_code_module.py
79
3009
"Test InteractiveConsole and InteractiveInterpreter from code module" import sys import unittest from contextlib import ExitStack from unittest import mock from test import support code = support.import_module('code') class TestInteractiveConsole(unittest.TestCase): def setUp(self): self.console = code.InteractiveConsole() self.mock_sys() def mock_sys(self): "Mock system environment for InteractiveConsole" # use exit stack to match patch context managers to addCleanup stack = ExitStack() self.addCleanup(stack.close) self.infunc = stack.enter_context(mock.patch('code.input', create=True)) self.stdout = stack.enter_context(mock.patch('code.sys.stdout')) self.stderr = stack.enter_context(mock.patch('code.sys.stderr')) prepatch = mock.patch('code.sys', wraps=code.sys, spec=code.sys) self.sysmod = stack.enter_context(prepatch) if sys.excepthook is sys.__excepthook__: self.sysmod.excepthook = self.sysmod.__excepthook__ def test_ps1(self): self.infunc.side_effect = EOFError('Finished') self.console.interact() self.assertEqual(self.sysmod.ps1, '>>> ') def test_ps2(self): self.infunc.side_effect = EOFError('Finished') self.console.interact() self.assertEqual(self.sysmod.ps2, '... ') def test_console_stderr(self): self.infunc.side_effect = ["'antioch'", "", EOFError('Finished')] self.console.interact() for call in list(self.stdout.method_calls): if 'antioch' in ''.join(call[1]): break else: raise AssertionError("no console stdout") def test_syntax_error(self): self.infunc.side_effect = ["undefined", EOFError('Finished')] self.console.interact() for call in self.stderr.method_calls: if 'NameError' in ''.join(call[1]): break else: raise AssertionError("No syntax error from console") def test_sysexcepthook(self): self.infunc.side_effect = ["raise ValueError('')", EOFError('Finished')] hook = mock.Mock() self.sysmod.excepthook = hook self.console.interact() self.assertTrue(hook.called) def test_banner(self): # with banner self.infunc.side_effect = EOFError('Finished') self.console.interact(banner='Foo') self.assertEqual(len(self.stderr.method_calls), 2) banner_call = self.stderr.method_calls[0] self.assertEqual(banner_call, ['write', ('Foo\n',), {}]) # no banner self.stderr.reset_mock() self.infunc.side_effect = EOFError('Finished') self.console.interact(banner='') self.assertEqual(len(self.stderr.method_calls), 1) def test_main(): support.run_unittest(TestInteractiveConsole) if __name__ == "__main__": unittest.main()
lgpl-3.0
dorotan/pythontraining
env/Lib/base64.py
15
20442
#! /usr/bin/env python3 """Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" # Modified 04-Oct-1995 by Jack Jansen to use binascii module # Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support # Modified 22-May-2007 by Guido van Rossum to use bytes everywhere import re import struct import binascii __all__ = [ # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', 'b16encode', 'b16decode', # Base85 and Ascii85 encodings 'b85encode', 'b85decode', 'a85encode', 'a85decode', # Standard Base64 encoding 'standard_b64encode', 'standard_b64decode', # Some common Base64 alternatives. As referenced by RFC 3458, see thread # starting at: # # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html 'urlsafe_b64encode', 'urlsafe_b64decode', ] bytes_types = (bytes, bytearray) # Types acceptable as binary data def _bytes_from_decode_data(s): if isinstance(s, str): try: return s.encode('ascii') except UnicodeEncodeError: raise ValueError('string argument should contain only ASCII characters') if isinstance(s, bytes_types): return s try: return memoryview(s).tobytes() except TypeError: raise TypeError("argument should be a bytes-like object or ASCII " "string, not %r" % s.__class__.__name__) from None # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): """Encode the bytes-like object s using Base64 and return a bytes object. Optional altchars should be a byte string of length 2 which specifies an alternative alphabet for the '+' and '/' characters. This allows an application to e.g. generate url or filesystem safe Base64 strings. """ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] if altchars is not None: assert len(altchars) == 2, repr(altchars) return encoded.translate(bytes.maketrans(b'+/', altchars)) return encoded def b64decode(s, altchars=None, validate=False): """Decode the Base64 encoded bytes-like object or ASCII string s. Optional altchars must be a bytes-like object or ASCII string of length 2 which specifies the alternative alphabet used instead of the '+' and '/' characters. The result is returned as a bytes object. A binascii.Error is raised if s is incorrectly padded. If validate is False (the default), characters that are neither in the normal base-64 alphabet nor the alternative alphabet are discarded prior to the padding check. If validate is True, these non-alphabet characters in the input result in a binascii.Error. """ s = _bytes_from_decode_data(s) if altchars is not None: altchars = _bytes_from_decode_data(altchars) assert len(altchars) == 2, repr(altchars) s = s.translate(bytes.maketrans(altchars, b'+/')) if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s): raise binascii.Error('Non-base64 digit found') return binascii.a2b_base64(s) def standard_b64encode(s): """Encode bytes-like object s using the standard Base64 alphabet. The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): """Decode bytes encoded with the standard Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the standard alphabet are discarded prior to the padding check. """ return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): """Encode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object to encode. The result is returned as a bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): """Decode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the URL-safe base-64 alphabet, and are not a plus '+' or slash '/', are discarded prior to the padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ s = _bytes_from_decode_data(s) s = s.translate(_urlsafe_decode_translation) return b64decode(s) # Base32 encoding/decoding must be done in Python _b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' _b32tab2 = None _b32rev = None def b32encode(s): """Encode the bytes-like object s using Base32 and return a bytes object. """ global _b32tab2 # Delay the initialization of the table to not waste memory # if the function is never called if _b32tab2 is None: b32tab = [bytes((i,)) for i in _b32alphabet] _b32tab2 = [a + b for a in b32tab for b in b32tab] b32tab = None if not isinstance(s, bytes_types): s = memoryview(s).tobytes() leftover = len(s) % 5 # Pad the last quantum with zero bits if necessary if leftover: s = s + bytes(5 - leftover) # Don't use += ! encoded = bytearray() from_bytes = int.from_bytes b32tab2 = _b32tab2 for i in range(0, len(s), 5): c = from_bytes(s[i: i + 5], 'big') encoded += (b32tab2[c >> 30] + # bits 1 - 10 b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 b32tab2[c & 0x3ff] # bits 31 - 40 ) # Adjust for any leftover partial quanta if leftover == 1: encoded[-6:] = b'======' elif leftover == 2: encoded[-4:] = b'====' elif leftover == 3: encoded[-3:] = b'===' elif leftover == 4: encoded[-1:] = b'=' return bytes(encoded) def b32decode(s, casefold=False, map01=None): """Decode the Base32 encoded bytes-like object or ASCII string s. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to either the letter I (eye) or letter L (el). The optional argument map01 when not None, specifies which letter the digit 1 should be mapped to (when map01 is not None, the digit 0 is always mapped to the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. """ global _b32rev # Delay the initialization of the table to not waste memory # if the function is never called if _b32rev is None: _b32rev = {v: k for k, v in enumerate(_b32alphabet)} s = _bytes_from_decode_data(s) if len(s) % 8: raise binascii.Error('Incorrect padding') # Handle section 2.4 zero and one mapping. The flag map01 will be either # False, or the character to map the digit 1 (one) to. It should be # either L (el) or I (eye). if map01 is not None: map01 = _bytes_from_decode_data(map01) assert len(map01) == 1, repr(map01) s = s.translate(bytes.maketrans(b'01', b'O' + map01)) if casefold: s = s.upper() # Strip off pad characters from the right. We need to count the pad # characters because this will tell us how many null bytes to remove from # the end of the decoded string. l = len(s) s = s.rstrip(b'=') padchars = l - len(s) # Now decode the full quanta decoded = bytearray() b32rev = _b32rev for i in range(0, len(s), 8): quanta = s[i: i + 8] acc = 0 try: for c in quanta: acc = (acc << 5) + b32rev[c] except KeyError: raise binascii.Error('Non-base32 digit found') from None decoded += acc.to_bytes(5, 'big') # Process the last, partial quanta if padchars: acc <<= 5 * padchars last = acc.to_bytes(5, 'big') if padchars == 1: decoded[-5:] = last[:-1] elif padchars == 3: decoded[-5:] = last[:-2] elif padchars == 4: decoded[-5:] = last[:-3] elif padchars == 6: decoded[-5:] = last[:-4] else: raise binascii.Error('Incorrect padding') return bytes(decoded) # RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): """Decode the Base16 encoded bytes-like object or ASCII string s. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. The result is returned as a bytes object. A binascii.Error is raised if s is incorrectly padded or if there are non-alphabet characters present in the input. """ s = _bytes_from_decode_data(s) if casefold: s = s.upper() if re.search(b'[^0-9A-F]', s): raise binascii.Error('Non-base16 digit found') return binascii.unhexlify(s) # # Ascii85 encoding/decoding # _a85chars = None _a85chars2 = None _A85START = b"<~" _A85END = b"~>" def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): # Helper function for a85encode and b85encode if not isinstance(b, bytes_types): b = memoryview(b).tobytes() padding = (-len(b)) % 4 if padding: b = b + b'\0' * padding words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) chunks = [b'z' if foldnuls and not word else b'y' if foldspaces and word == 0x20202020 else (chars2[word // 614125] + chars2[word // 85 % 7225] + chars[word % 85]) for word in words] if padding and not pad: if chunks[-1] == b'z': chunks[-1] = chars[0] * 5 chunks[-1] = chunks[-1][:-padding] return b''.join(chunks) def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): """Encode bytes-like object b using Ascii85 and return a bytes object. foldspaces is an optional flag that uses the special short sequence 'y' instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This feature is not supported by the "standard" Adobe encoding. wrapcol controls whether the output should have newline (b'\\n') characters added to it. If this is non-zero, each output line will be at most this many characters long. pad controls whether the input is padded to a multiple of 4 before encoding. Note that the btoa implementation always pads. adobe controls whether the encoded byte sequence is framed with <~ and ~>, which is used by the Adobe implementation. """ global _a85chars, _a85chars2 # Delay the initialization of tables to not waste memory # if the function is never called if _a85chars is None: _a85chars = [bytes((i,)) for i in range(33, 118)] _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) if adobe: result = _A85START + result if wrapcol: wrapcol = max(2 if adobe else 1, wrapcol) chunks = [result[i: i + wrapcol] for i in range(0, len(result), wrapcol)] if adobe: if len(chunks[-1]) + 2 > wrapcol: chunks.append(b'') result = b'\n'.join(chunks) if adobe: result += _A85END return result def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): """Decode the Ascii85 encoded bytes-like object or ASCII string b. foldspaces is a flag that specifies whether the 'y' short sequence should be accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is not supported by the "standard" Adobe encoding. adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. is framed with <~ and ~>). ignorechars should be a byte string containing characters to ignore from the input. This should only contain whitespace characters, and by default contains all whitespace characters in ASCII. The result is returned as a bytes object. """ b = _bytes_from_decode_data(b) if adobe: if not b.endswith(_A85END): raise ValueError( "Ascii85 encoded byte sequences must end " "with {!r}".format(_A85END) ) if b.startswith(_A85START): b = b[2:-2] # Strip off start/end markers else: b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences # packI = struct.Struct('!I').pack decoded = [] decoded_append = decoded.append curr = [] curr_append = curr.append curr_clear = curr.clear for x in b + b'u' * 4: if b'!'[0] <= x <= b'u'[0]: curr_append(x) if len(curr) == 5: acc = 0 for x in curr: acc = 85 * acc + (x - 33) try: decoded_append(packI(acc)) except struct.error: raise ValueError('Ascii85 overflow') from None curr_clear() elif x == b'z'[0]: if curr: raise ValueError('z inside Ascii85 5-tuple') decoded_append(b'\0\0\0\0') elif foldspaces and x == b'y'[0]: if curr: raise ValueError('y inside Ascii85 5-tuple') decoded_append(b'\x20\x20\x20\x20') elif x in ignorechars: # Skip whitespace continue else: raise ValueError('Non-Ascii85 digit found: %c' % x) result = b''.join(decoded) padding = 4 - len(curr) if padding: # Throw away the extra padding result = result[:-padding] return result # The following code is originally taken (with permission) from Mercurial _b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") _b85chars = None _b85chars2 = None _b85dec = None def b85encode(b, pad=False): """Encode bytes-like object b in base85 format and return a bytes object. If pad is true, the input is padded with b'\\0' so its length is a multiple of 4 bytes before encoding. """ global _b85chars, _b85chars2 # Delay the initialization of tables to not waste memory # if the function is never called if _b85chars is None: _b85chars = [bytes((i,)) for i in _b85alphabet] _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] return _85encode(b, _b85chars, _b85chars2, pad) def b85decode(b): """Decode the base85-encoded bytes-like object or ASCII string b The result is returned as a bytes object. """ global _b85dec # Delay the initialization of tables to not waste memory # if the function is never called if _b85dec is None: _b85dec = [None] * 256 for i, c in enumerate(_b85alphabet): _b85dec[c] = i b = _bytes_from_decode_data(b) padding = (-len(b)) % 5 b = b + b'~' * padding out = [] packI = struct.Struct('!I').pack for i in range(0, len(b), 5): chunk = b[i:i + 5] acc = 0 try: for c in chunk: acc = acc * 85 + _b85dec[c] except TypeError: for j, c in enumerate(chunk): if _b85dec[c] is None: raise ValueError('bad base85 character at position %d' % (i + j)) from None raise try: out.append(packI(acc)) except struct.error: raise ValueError('base85 overflow in hunk starting at byte %d' % i) from None result = b''.join(out) if padding: result = result[:-padding] return result # Legacy interface. This code could be cleaned up since I don't believe # binascii has any line length limitations. It just doesn't seem worth it # though. The files should be opened in binary mode. MAXLINESIZE = 76 # Excluding the CRLF MAXBINSIZE = (MAXLINESIZE//4)*3 def encode(input, output): """Encode a file; input and output are binary files.""" while True: s = input.read(MAXBINSIZE) if not s: break while len(s) < MAXBINSIZE: ns = input.read(MAXBINSIZE-len(s)) if not ns: break s += ns line = binascii.b2a_base64(s) output.write(line) def decode(input, output): """Decode a file; input and output are binary files.""" while True: line = input.readline() if not line: break s = binascii.a2b_base64(line) output.write(s) def _input_type_check(s): try: m = memoryview(s) except TypeError as err: msg = "expected bytes-like object, not %s" % s.__class__.__name__ raise TypeError(msg) from err if m.format not in ('c', 'b', 'B'): msg = ("expected single byte elements, not %r from %s" % (m.format, s.__class__.__name__)) raise TypeError(msg) if m.ndim != 1: msg = ("expected 1-D data, not %d-D data from %s" % (m.ndim, s.__class__.__name__)) raise TypeError(msg) def encodebytes(s): """Encode a bytestring into a bytes object containing multiple lines of base-64 data.""" _input_type_check(s) pieces = [] for i in range(0, len(s), MAXBINSIZE): chunk = s[i : i + MAXBINSIZE] pieces.append(binascii.b2a_base64(chunk)) return b"".join(pieces) def encodestring(s): """Legacy alias of encodebytes().""" import warnings warnings.warn("encodestring() is a deprecated alias, use encodebytes()", DeprecationWarning, 2) return encodebytes(s) def decodebytes(s): """Decode a bytestring of base-64 data into a bytes object.""" _input_type_check(s) return binascii.a2b_base64(s) def decodestring(s): """Legacy alias of decodebytes().""" import warnings warnings.warn("decodestring() is a deprecated alias, use decodebytes()", DeprecationWarning, 2) return decodebytes(s) # Usable as a script... def main(): """Small main program""" import sys, getopt try: opts, args = getopt.getopt(sys.argv[1:], 'deut') except getopt.error as msg: sys.stdout = sys.stderr print(msg) print("""usage: %s [-d|-e|-u|-t] [file|-] -d, -u: decode -e: encode (default) -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) sys.exit(2) func = encode for o, a in opts: if o == '-e': func = encode if o == '-d': func = decode if o == '-u': func = decode if o == '-t': test(); return if args and args[0] != '-': with open(args[0], 'rb') as f: func(f, sys.stdout.buffer) else: func(sys.stdin.buffer, sys.stdout.buffer) def test(): s0 = b"Aladdin:open sesame" print(repr(s0)) s1 = encodebytes(s0) print(repr(s1)) s2 = decodebytes(s1) print(repr(s2)) assert s0 == s2 if __name__ == '__main__': main()
apache-2.0
larks/mbed
workspace_tools/host_tests/udpecho_server_auto.py
101
2515
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re import sys import uuid from sys import stdout from socket import socket, AF_INET, SOCK_DGRAM class UDPEchoServerTest(): ECHO_SERVER_ADDRESS = "" ECHO_PORT = 0 s = None # Socket PATTERN_SERVER_IP = "Server IP Address is (\d+).(\d+).(\d+).(\d+):(\d+)" re_detect_server_ip = re.compile(PATTERN_SERVER_IP) def test(self, selftest): result = True serial_ip_msg = selftest.mbed.serial_readline() if serial_ip_msg is None: return selftest.RESULT_IO_SERIAL selftest.notify(serial_ip_msg) # Searching for IP address and port prompted by server m = self.re_detect_server_ip.search(serial_ip_msg) if m and len(m.groups()): self.ECHO_SERVER_ADDRESS = ".".join(m.groups()[:4]) self.ECHO_PORT = int(m.groups()[4]) # must be integer for socket.connect method selftest.notify("HOST: UDP Server found at: " + self.ECHO_SERVER_ADDRESS + ":" + str(self.ECHO_PORT)) # We assume this test fails so can't send 'error' message to server try: self.s = socket(AF_INET, SOCK_DGRAM) except Exception, e: self.s = None selftest.notify("HOST: Socket error: %s"% e) return selftest.RESULT_ERROR for i in range(0, 100): TEST_STRING = str(uuid.uuid4()) self.s.sendto(TEST_STRING, (self.ECHO_SERVER_ADDRESS, self.ECHO_PORT)) data = self.s.recv(len(TEST_STRING)) received_str = repr(data)[1:-1] if TEST_STRING != received_str: result = False break sys.stdout.write('.') stdout.flush() else: result = False if self.s is not None: self.s.close() return selftest.RESULT_SUCCESS if result else selftest.RESULT_FAILURE
apache-2.0
cgar/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_handshake_wsh.py
465
1781
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from mod_pywebsocket import handshake def web_socket_do_extra_handshake(request): raise handshake.AbortedByUserException( "Aborted in web_socket_do_extra_handshake") def web_socket_transfer_data(request): pass # vi:sts=4 sw=4 et
mpl-2.0
vitan/hue
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/validate_templates.py
35
3355
import os from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.core.management.color import color_style from django.template.base import add_to_builtins from django.template.loaders.filesystem import Loader from django_extensions.utils import validatingtemplatetags from django_extensions.management.utils import signalcommand # # TODO: Render the template with fake request object ? # class Command(BaseCommand): args = '' help = "Validate templates on syntax and compile errors" option_list = BaseCommand.option_list + ( make_option('--break', '-b', action='store_true', dest='break', default=False, help="Break on first error."), make_option('--check-urls', '-u', action='store_true', dest='check_urls', default=False, help="Check url tag view names are quoted appropriately"), make_option('--force-new-urls', '-n', action='store_true', dest='force_new_urls', default=False, help="Error on usage of old style url tags (without {% load urls from future %}"), make_option('--include', '-i', action='append', dest='includes', default=[], help="Append these paths to TEMPLATE_DIRS") ) @signalcommand def handle(self, *args, **options): from django.conf import settings style = color_style() template_dirs = set(settings.TEMPLATE_DIRS) template_dirs |= set(options.get('includes', [])) template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', [])) settings.TEMPLATE_DIRS = list(template_dirs) settings.TEMPLATE_DEBUG = True verbosity = int(options.get('verbosity', 1)) errors = 0 template_loader = Loader() # Replace built in template tags with our own validating versions if options.get('check_urls', False): add_to_builtins('django_extensions.utils.validatingtemplatetags') for template_dir in template_dirs: for root, dirs, filenames in os.walk(template_dir): for filename in filenames: if filename.endswith(".swp"): continue if filename.endswith("~"): continue filepath = os.path.join(root, filename) if verbosity > 1: print(filepath) validatingtemplatetags.before_new_template(options.get('force_new_urls', False)) try: template_loader.load_template(filename, [root]) except Exception as e: errors += 1 print("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e))))) template_errors = validatingtemplatetags.get_template_errors() for origin, line, message in template_errors: errors += 1 print("%s(%s): %s" % (origin, line, style.ERROR(message))) if errors and options.get('break', False): raise CommandError("Errors found") if errors: raise CommandError("%s errors found" % errors) print("%s errors found" % errors)
apache-2.0
c0defreak/python-for-android
python3-alpha/python3-src/Tools/unicode/mkstringprep.py
47
10046
import re, unicodedata, sys if sys.maxunicode == 65535: raise RuntimeError("need UCS-4 Python") def gen_category(cats): for i in range(0, 0x110000): if unicodedata.category(chr(i)) in cats: yield(i) def gen_bidirectional(cats): for i in range(0, 0x110000): if unicodedata.bidirectional(chr(i)) in cats: yield(i) def compact_set(l): single = [] tuple = [] prev = None span = 0 for e in l: if prev is None: prev = e span = 0 continue if prev+span+1 != e: if span > 2: tuple.append((prev,prev+span+1)) else: for i in range(prev, prev+span+1): single.append(i) prev = e span = 0 else: span += 1 if span: tuple.append((prev,prev+span+1)) else: single.append(prev) tuple = " + ".join(["list(range(%d,%d))" % t for t in tuple]) if not single: return "set(%s)" % tuple if not tuple: return "set(%s)" % repr(single) return "set(%s + %s)" % (repr(single),tuple) ############## Read the tables in the RFC ####################### data = open("rfc3454.txt").readlines() tables = [] curname = None for l in data: l = l.strip() if not l: continue # Skip RFC page breaks if l.startswith("Hoffman & Blanchet") or\ l.startswith("RFC 3454"): continue # Find start/end lines m = re.match("----- (Start|End) Table ([A-Z](.[0-9])+) -----", l) if m: if m.group(1) == "Start": if curname: raise RuntimeError("Double Start", (curname, l)) curname = m.group(2) table = {} tables.append((curname, table)) continue else: if not curname: raise RuntimeError("End without start", l) curname = None continue if not curname: continue # Now we are in a table fields = l.split(";") if len(fields) > 1: # Drop comment field fields = fields[:-1] if len(fields) == 1: fields = fields[0].split("-") if len(fields) > 1: # range try: start, end = fields except ValueError: raise RuntimeError("Unpacking problem", l) else: start = end = fields[0] start = int(start, 16) end = int(end, 16) for i in range(start, end+1): table[i] = i else: code, value = fields value = value.strip() if value: value = [int(v, 16) for v in value.split(" ")] else: # table B.1 value = None table[int(code, 16)] = value ########### Generate compact Python versions of the tables ############# print("""# This file is generated by mkstringprep.py. DO NOT EDIT. \"\"\"Library that exposes various tables found in the StringPrep RFC 3454. There are two kinds of tables: sets, for which a member test is provided, and mappings, for which a mapping function is provided. \"\"\" import unicodedata """) print("assert unicodedata.unidata_version == %s" % repr(unicodedata.unidata_version)) # A.1 is the table of unassigned characters # XXX Plane 15 PUA is listed as unassigned in Python. name, table = tables[0] del tables[0] assert name == "A.1" table = set(table.keys()) Cn = set(gen_category(["Cn"])) # FDD0..FDEF are process internal codes Cn -= set(range(0xFDD0, 0xFDF0)) # not a character Cn -= set(range(0xFFFE, 0x110000, 0x10000)) Cn -= set(range(0xFFFF, 0x110000, 0x10000)) # assert table == Cn print(""" def in_table_a1(code): if unicodedata.category(code) != 'Cn': return False c = ord(code) if 0xFDD0 <= c < 0xFDF0: return False return (c & 0xFFFF) not in (0xFFFE, 0xFFFF) """) # B.1 cannot easily be derived name, table = tables[0] del tables[0] assert name == "B.1" table = sorted(table.keys()) print(""" b1_set = """ + compact_set(table) + """ def in_table_b1(code): return ord(code) in b1_set """) # B.2 and B.3 is case folding. # It takes CaseFolding.txt into account, which is # not available in the Python database. Since # B.2 is derived from B.3, we process B.3 first. # B.3 supposedly *is* CaseFolding-3.2.0.txt. name, table_b2 = tables[0] del tables[0] assert name == "B.2" name, table_b3 = tables[0] del tables[0] assert name == "B.3" # B.3 is mostly Python's .lower, except for a number # of special cases, e.g. considering canonical forms. b3_exceptions = {} for k,v in table_b2.items(): if map(ord, unichr(k).lower()) != v: b3_exceptions[k] = u"".join(map(unichr,v)) b3 = sorted(b3_exceptions.items()) print(""" b3_exceptions = {""") for i,(k,v) in enumerate(b3): print("0x%x:%s," % (k, repr(v)), end=' ') if i % 4 == 3: print() print("}") print(""" def map_table_b3(code): r = b3_exceptions.get(ord(code)) if r is not None: return r return code.lower() """) def map_table_b3(code): r = b3_exceptions.get(ord(code)) if r is not None: return r return code.lower() # B.2 is case folding for NFKC. This is the same as B.3, # except where NormalizeWithKC(Fold(a)) != # NormalizeWithKC(Fold(NormalizeWithKC(Fold(a)))) def map_table_b2(a): al = map_table_b3(a) b = unicodedata.normalize("NFKC", al) bl = "".join([map_table_b3(ch) for ch in b]) c = unicodedata.normalize("NFKC", bl) if b != c: return c else: return al specials = {} for k,v in table_b2.items(): if list(map(ord, map_table_b2(chr(k)))) != v: specials[k] = v # B.3 should not add any additional special cases assert specials == {} print(""" def map_table_b2(a): al = map_table_b3(a) b = unicodedata.normalize("NFKC", al) bl = u"".join([map_table_b3(ch) for ch in b]) c = unicodedata.normalize("NFKC", bl) if b != c: return c else: return al """) # C.1.1 is a table with a single character name, table = tables[0] del tables[0] assert name == "C.1.1" assert table == {0x20:0x20} print(""" def in_table_c11(code): return code == u" " """) # C.1.2 is the rest of all space characters name, table = tables[0] del tables[0] assert name == "C.1.2" # table = set(table.keys()) # Zs = set(gen_category(["Zs"])) - set([0x20]) # assert Zs == table print(""" def in_table_c12(code): return unicodedata.category(code) == "Zs" and code != u" " def in_table_c11_c12(code): return unicodedata.category(code) == "Zs" """) # C.2.1 ASCII control characters name, table_c21 = tables[0] del tables[0] assert name == "C.2.1" Cc = set(gen_category(["Cc"])) Cc_ascii = Cc & set(range(128)) table_c21 = set(table_c21.keys()) assert Cc_ascii == table_c21 print(""" def in_table_c21(code): return ord(code) < 128 and unicodedata.category(code) == "Cc" """) # C.2.2 Non-ASCII control characters. It also includes # a number of characters in category Cf. name, table_c22 = tables[0] del tables[0] assert name == "C.2.2" Cc_nonascii = Cc - Cc_ascii table_c22 = set(table_c22.keys()) assert len(Cc_nonascii - table_c22) == 0 specials = list(table_c22 - Cc_nonascii) specials.sort() print("""c22_specials = """ + compact_set(specials) + """ def in_table_c22(code): c = ord(code) if c < 128: return False if unicodedata.category(code) == "Cc": return True return c in c22_specials def in_table_c21_c22(code): return unicodedata.category(code) == "Cc" or \\ ord(code) in c22_specials """) # C.3 Private use name, table = tables[0] del tables[0] assert name == "C.3" Co = set(gen_category(["Co"])) assert set(table.keys()) == Co print(""" def in_table_c3(code): return unicodedata.category(code) == "Co" """) # C.4 Non-character code points, xFFFE, xFFFF # plus process internal codes name, table = tables[0] del tables[0] assert name == "C.4" nonchar = set(range(0xFDD0,0xFDF0)) nonchar.update(range(0xFFFE,0x110000,0x10000)) nonchar.update(range(0xFFFF,0x110000,0x10000)) table = set(table.keys()) assert table == nonchar print(""" def in_table_c4(code): c = ord(code) if c < 0xFDD0: return False if c < 0xFDF0: return True return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF) """) # C.5 Surrogate codes name, table = tables[0] del tables[0] assert name == "C.5" Cs = set(gen_category(["Cs"])) assert set(table.keys()) == Cs print(""" def in_table_c5(code): return unicodedata.category(code) == "Cs" """) # C.6 Inappropriate for plain text name, table = tables[0] del tables[0] assert name == "C.6" table = sorted(table.keys()) print(""" c6_set = """ + compact_set(table) + """ def in_table_c6(code): return ord(code) in c6_set """) # C.7 Inappropriate for canonical representation name, table = tables[0] del tables[0] assert name == "C.7" table = sorted(table.keys()) print(""" c7_set = """ + compact_set(table) + """ def in_table_c7(code): return ord(code) in c7_set """) # C.8 Change display properties or are deprecated name, table = tables[0] del tables[0] assert name == "C.8" table = sorted(table.keys()) print(""" c8_set = """ + compact_set(table) + """ def in_table_c8(code): return ord(code) in c8_set """) # C.9 Tagging characters name, table = tables[0] del tables[0] assert name == "C.9" table = sorted(table.keys()) print(""" c9_set = """ + compact_set(table) + """ def in_table_c9(code): return ord(code) in c9_set """) # D.1 Characters with bidirectional property "R" or "AL" name, table = tables[0] del tables[0] assert name == "D.1" RandAL = set(gen_bidirectional(["R","AL"])) assert set(table.keys()) == RandAL print(""" def in_table_d1(code): return unicodedata.bidirectional(code) in ("R","AL") """) # D.2 Characters with bidirectional property "L" name, table = tables[0] del tables[0] assert name == "D.2" L = set(gen_bidirectional(["L"])) assert set(table.keys()) == L print(""" def in_table_d2(code): return unicodedata.bidirectional(code) == "L" """)
apache-2.0
Daniex/horizon
openstack_dashboard/dashboards/project/data_processing/jobs/tabs.py
38
1172
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from django.utils.translation import ugettext_lazy as _ from horizon import tabs from openstack_dashboard.api import sahara as saharaclient LOG = logging.getLogger(__name__) class GeneralTab(tabs.Tab): name = _("General Info") slug = "job_details_tab" template_name = ("project/data_processing.jobs/_details.html") def get_context_data(self, request): job_id = self.tab_group.kwargs['job_id'] job = saharaclient.job_get(request, job_id) return {"job": job} class JobDetailsTabs(tabs.TabGroup): slug = "job_details" tabs = (GeneralTab,) sticky = True
apache-2.0
wd5/jangr
djangoappengine/tests/not_return_sets.py
36
4303
import datetime from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.test import TestCase from .testmodels import FieldsWithOptionsModel, OrderedModel, \ SelfReferenceModel class NonReturnSetsTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58, 2.4] emails = ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] def setUp(self): for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats, NonReturnSetsTest.emails)): self.last_save_time = datetime.datetime.now().time() ordered_instance = OrderedModel(priority=index, pk=index + 1) ordered_instance.save() model = FieldsWithOptionsModel(floating_point=float, integer=int(float), email=email, time=self.last_save_time, foreign_key=ordered_instance) model.save() def test_get(self): self.assertEquals( FieldsWithOptionsModel.objects.get( email='[email protected]').email, '[email protected]') # Test exception when matching multiple entities. self.assertRaises(MultipleObjectsReturned, FieldsWithOptionsModel.objects.get, integer=2) # Test exception when entity does not exist. self.assertRaises(ObjectDoesNotExist, FieldsWithOptionsModel.objects.get, floating_point=5.2) # TODO: Test create when djangos model.save_base is refactored. # TODO: Test get_or_create when refactored. def test_count(self): self.assertEquals( FieldsWithOptionsModel.objects.filter(integer=2).count(), 2) def test_in_bulk(self): self.assertEquals( [key in ['[email protected]', '[email protected]'] for key in FieldsWithOptionsModel.objects.in_bulk( ['[email protected]', '[email protected]']).keys()], [True, ] * 2) def test_latest(self): self.assertEquals( FieldsWithOptionsModel.objects.latest('time').email, '[email protected]') def test_exists(self): self.assertEquals(FieldsWithOptionsModel.objects.exists(), True) def test_deletion(self): # TODO: ForeignKeys will not be deleted! This has to be done # via background tasks. self.assertEquals(FieldsWithOptionsModel.objects.count(), 5) FieldsWithOptionsModel.objects.get(email='[email protected]').delete() self.assertEquals(FieldsWithOptionsModel.objects.count(), 4) FieldsWithOptionsModel.objects.filter(email__in=[ '[email protected]', '[email protected]', '[email protected]', ]).delete() self.assertEquals(FieldsWithOptionsModel.objects.count(), 2) def test_selfref_deletion(self): entity = SelfReferenceModel() entity.save() entity.delete() def test_foreign_key_fetch(self): # Test fetching the ForeignKey. ordered_instance = OrderedModel.objects.get(priority=2) self.assertEquals( FieldsWithOptionsModel.objects.get(integer=9).foreign_key, ordered_instance) def test_foreign_key_backward(self): entity = OrderedModel.objects.all()[0] self.assertEquals(entity.keys.count(), 1) # TODO: Add should save the added instance transactional via for # example force_insert. new_foreign_key = FieldsWithOptionsModel( floating_point=5.6, integer=3, email='[email protected]', time=datetime.datetime.now()) entity.keys.add(new_foreign_key) self.assertEquals(entity.keys.count(), 2) # TODO: Add test for create. entity.keys.remove(new_foreign_key) self.assertEquals(entity.keys.count(), 1) entity.keys.clear() self.assertTrue(not entity.keys.exists()) entity.keys = [new_foreign_key, new_foreign_key] self.assertEquals(entity.keys.count(), 1) self.assertEquals(entity.keys.all()[0].integer, 3)
bsd-3-clause
GREO/GNU-Radio
gnuradio-examples/python/digital_voice/encdec.py
10
2029
#!/usr/bin/env python # # Copyright 2005 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, blks2 from gnuradio import audio from gnuradio.eng_option import eng_option from optparse import OptionParser class my_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) parser = OptionParser(option_class=eng_option) parser.add_option("-I", "--audio-input", type="string", default="", help="pcm input device name. E.g., hw:0,0 or /dev/dsp") parser.add_option("-O", "--audio-output", type="string", default="", help="pcm output device name. E.g., hw:0,0 or /dev/dsp") (options, args) = parser.parse_args () if len(args) != 0: parser.print_help() raise SystemExit, 1 sample_rate = 8000 src = audio.source(sample_rate, options.audio_input) tx = blks2.digital_voice_tx(self) if_gain = gr.multiply_const_cc(10000) # channel simulator here... rx = blks2.digital_voice_rx(self) dst = audio.sink(sample_rate, options.audio_output) self.connect(src, tx, if_gain, rx, dst) if __name__ == '__main__': try: my_top_block().run() except KeyboardInterrupt: pass
gpl-3.0
yakky/django
tests/check_framework/test_caches.py
249
1114
from django.core.checks.caches import E001 from django.test import SimpleTestCase from django.test.utils import override_settings class CheckCacheSettingsAppDirsTest(SimpleTestCase): VALID_CACHES_CONFIGURATION = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, } INVALID_CACHES_CONFIGURATION = { 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, } @property def func(self): from django.core.checks.caches import check_default_cache_is_configured return check_default_cache_is_configured @override_settings(CACHES=VALID_CACHES_CONFIGURATION) def test_default_cache_included(self): """ Don't error if 'default' is present in CACHES setting. """ self.assertEqual(self.func(None), []) @override_settings(CACHES=INVALID_CACHES_CONFIGURATION) def test_default_cache_not_included(self): """ Error if 'default' not present in CACHES setting. """ self.assertEqual(self.func(None), [E001])
bsd-3-clause
westinedu/newertrends
django/conf/locale/pt_BR/formats.py
231
1530
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'j \de N \de Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = r'j \de N \de Y à\s H:i' YEAR_MONTH_FORMAT = r'F \de Y' MONTH_DAY_FORMAT = r'j \de F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06' # '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006' # '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
TixLo/kademlia
lib/jsoncpp/devtools/antglob.py
3
7695
#!/usr/bin/env python # encoding: utf-8 # Baptiste Lepilleur, 2009 from __future__ import print_function from dircache import listdir import re import fnmatch import os.path # These fnmatch expressions are used by default to prune the directory tree # while doing the recursive traversal in the glob_impl method of glob function. prune_dirs = '.git .bzr .hg .svn _MTN _darcs CVS SCCS ' # These fnmatch expressions are used by default to exclude files and dirs # while doing the recursive traversal in the glob_impl method of glob function. ##exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split() # These ant_glob expressions are used by default to exclude files and dirs and also prune the directory tree # while doing the recursive traversal in the glob_impl method of glob function. default_excludes = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/.git **/.git/** **/.gitignore **/.bzr **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/_darcs **/_darcs/** **/.DS_Store ''' DIR = 1 FILE = 2 DIR_LINK = 4 FILE_LINK = 8 LINKS = DIR_LINK | FILE_LINK ALL_NO_LINK = DIR | FILE ALL = DIR | FILE | LINKS _ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)') def ant_pattern_to_re(ant_pattern): """Generates a regular expression from the ant pattern. Matching convention: **/a: match 'a', 'dir/a', 'dir1/dir2/a' a/**/b: match 'a/b', 'a/c/b', 'a/d/c/b' *.py: match 'script.py' but not 'a/script.py' """ rex = ['^'] next_pos = 0 sep_rex = r'(?:/|%s)' % re.escape(os.path.sep) ## print 'Converting', ant_pattern for match in _ANT_RE.finditer(ant_pattern): ## print 'Matched', match.group() ## print match.start(0), next_pos if match.start(0) != next_pos: raise ValueError("Invalid ant pattern") if match.group(1): # /**/ rex.append(sep_rex + '(?:.*%s)?' % sep_rex) elif match.group(2): # **/ rex.append('(?:.*%s)?' % sep_rex) elif match.group(3): # /** rex.append(sep_rex + '.*') elif match.group(4): # * rex.append('[^/%s]*' % re.escape(os.path.sep)) elif match.group(5): # / rex.append(sep_rex) else: # somepath rex.append(re.escape(match.group(6))) next_pos = match.end() rex.append('$') return re.compile(''.join(rex)) def _as_list(l): if isinstance(l, basestring): return l.split() return l def glob(dir_path, includes = '**/*', excludes = default_excludes, entry_type = FILE, prune_dirs = prune_dirs, max_depth = 25): include_filter = [ant_pattern_to_re(p) for p in _as_list(includes)] exclude_filter = [ant_pattern_to_re(p) for p in _as_list(excludes)] prune_dirs = [p.replace('/',os.path.sep) for p in _as_list(prune_dirs)] dir_path = dir_path.replace('/',os.path.sep) entry_type_filter = entry_type def is_pruned_dir(dir_name): for pattern in prune_dirs: if fnmatch.fnmatch(dir_name, pattern): return True return False def apply_filter(full_path, filter_rexs): """Return True if at least one of the filter regular expression match full_path.""" for rex in filter_rexs: if rex.match(full_path): return True return False def glob_impl(root_dir_path): child_dirs = [root_dir_path] while child_dirs: dir_path = child_dirs.pop() for entry in listdir(dir_path): full_path = os.path.join(dir_path, entry) ## print 'Testing:', full_path, is_dir = os.path.isdir(full_path) if is_dir and not is_pruned_dir(entry): # explore child directory ? ## print '===> marked for recursion', child_dirs.append(full_path) included = apply_filter(full_path, include_filter) rejected = apply_filter(full_path, exclude_filter) if not included or rejected: # do not include entry ? ## print '=> not included or rejected' continue link = os.path.islink(full_path) is_file = os.path.isfile(full_path) if not is_file and not is_dir: ## print '=> unknown entry type' continue if link: entry_type = is_file and FILE_LINK or DIR_LINK else: entry_type = is_file and FILE or DIR ## print '=> type: %d' % entry_type, if (entry_type & entry_type_filter) != 0: ## print ' => KEEP' yield os.path.join(dir_path, entry) ## else: ## print ' => TYPE REJECTED' return list(glob_impl(dir_path)) if __name__ == "__main__": import unittest class AntPatternToRETest(unittest.TestCase): ## def test_conversion(self): ## self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern) def test_matching(self): test_cases = [ ('path', ['path'], ['somepath', 'pathsuffix', '/path', '/path']), ('*.py', ['source.py', 'source.ext.py', '.py'], ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']), ('**/path', ['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'], ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']), ('path/**', ['path/a', 'path/path/a', 'path//'], ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']), ('/**/path', ['/path', '/a/path', '/a/b/path/path', '/path/path'], ['path', 'path/', 'a/path', '/pathsuffix', '/somepath']), ('a/b', ['a/b'], ['somea/b', 'a/bsuffix', 'a/b/c']), ('**/*.py', ['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'], ['script.pyc', 'script.pyo', 'a.py/b']), ('src/**/*.py', ['src/a.py', 'src/dir/a.py'], ['a/src/a.py', '/src/a.py']), ] for ant_pattern, accepted_matches, rejected_matches in list(test_cases): def local_path(paths): return [ p.replace('/',os.path.sep) for p in paths ] test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches))) for ant_pattern, accepted_matches, rejected_matches in test_cases: rex = ant_pattern_to_re(ant_pattern) print('ant_pattern:', ant_pattern, ' => ', rex.pattern) for accepted_match in accepted_matches: print('Accepted?:', accepted_match) self.assertTrue(rex.match(accepted_match) is not None) for rejected_match in rejected_matches: print('Rejected?:', rejected_match) self.assertTrue(rex.match(rejected_match) is None) unittest.main()
gpl-3.0
crr0004/taiga-back
taiga/users/services.py
2
3912
# Copyright (C) 2014 Andrey Antukh <[email protected]> # Copyright (C) 2014 Jesús Espino <[email protected]> # Copyright (C) 2014 David Barragán <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This model contains a domain logic for users application. """ from django.apps import apps from django.db.models import Q from django.conf import settings from django.utils.translation import ugettext as _ from easy_thumbnails.files import get_thumbnailer from easy_thumbnails.exceptions import InvalidImageFormatError from taiga.base import exceptions as exc from taiga.base.utils.urls import get_absolute_url from .gravatar import get_gravatar_url def get_and_validate_user(*, username:str, password:str) -> bool: """ Check if user with username/email exists and specified password matchs well with existing user password. if user is valid, user is returned else, corresponding exception is raised. """ user_model = apps.get_model("users", "User") qs = user_model.objects.filter(Q(username=username) | Q(email=username)) if len(qs) == 0: raise exc.WrongArguments(_("Username or password does not matches user.")) user = qs[0] if not user.check_password(password): raise exc.WrongArguments(_("Username or password does not matches user.")) return user def get_photo_url(photo): """Get a photo absolute url and the photo automatically cropped.""" try: url = get_thumbnailer(photo)['avatar'].url return get_absolute_url(url) except InvalidImageFormatError as e: return None def get_photo_or_gravatar_url(user): """Get the user's photo/gravatar url.""" if user: return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email) return "" def get_big_photo_url(photo): """Get a big photo absolute url and the photo automatically cropped.""" try: url = get_thumbnailer(photo)['big-avatar'].url return get_absolute_url(url) except InvalidImageFormatError as e: return None def get_big_photo_or_gravatar_url(user): """Get the user's big photo/gravatar url.""" if not user: return "" if user.photo: return get_big_photo_url(user.photo) else: return get_gravatar_url(user.email, size=settings.DEFAULT_BIG_AVATAR_SIZE) def get_stats_for_user(user): """Get the user stats""" project_ids = user.memberships.values_list("project__id", flat=True).distinct() total_num_projects = project_ids.count() roles = [_(r) for r in user.memberships.values_list("role__name", flat=True)] roles = list(set(roles)) User = apps.get_model('users', 'User') total_num_contacts = User.objects.filter(memberships__project__id__in=project_ids)\ .exclude(id=user.id)\ .distinct()\ .count() UserStory = apps.get_model('userstories', 'UserStory') total_num_closed_userstories = UserStory.objects.filter(is_closed=True, assigned_to=user).count() project_stats = { 'total_num_projects': total_num_projects, 'roles': roles, 'total_num_contacts': total_num_contacts, 'total_num_closed_userstories': total_num_closed_userstories, } return project_stats
agpl-3.0
huihoo/reader
vendor/appdotnet.py
19
9191
import json import requests # To add # - Identity Delegation # - Streams (in dev by app.net) # - Filters (in dev by app.net) class Appdotnet: ''' Once access has been given, you don't have to pass through the client_id, client_secret, redirect_uri, or scope. These are just to get the authentication token. Once authenticated, you can initialise appdotnet with only the access token: ie api = Appdotnet(access_token='<insert token here>') ''' def __init__(self, client_id=None, client_secret=None, redirect_uri=None, scope=None, access_token=None): #for server authentication flow self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri self.scope = scope self.access_token = access_token self.api_anchor = "alpha.app.net" #for when the versions change #anchors currently different self.public_api_anchor = "alpha-api.app.net" #scopes provided by app.net API self.allowed_scopes = ['stream', 'email', 'write_post', 'follow', 'messages','export'] def generateAuthUrl(self): url = "https://" + self.api_anchor + "/oauth/authenticate?client_id="+\ self.client_id + "&response_type=code&adnview=appstore&redirect_uri=" +\ self.redirect_uri + "&scope=" for scope in self.scope: if scope in self.allowed_scopes: url += scope + " " return url def getAuthResponse(self, code): #generate POST request url = "https://alpha.app.net/oauth/access_token" post_data = {'client_id':self.client_id, 'client_secret':self.client_secret, 'grant_type':'authorization_code', 'redirect_uri':self.redirect_uri, 'code':code} r = requests.post(url,data=post_data) return r.text ''' API Calls ''' #GET REQUESTS def getRequest(self, url, getParameters=None): if not getParameters: getParameters = {} #access token url = url + "?access_token=" + self.access_token #if there are any extra get parameters aside from the access_token, append to the url if getParameters != {}: for key, value in getParameters.iteritems(): if not value: continue url = url + "&" + key + "=" + unicode(value) print url r = requests.get(url) if r.status_code == requests.codes.ok: return r.text else: j = json.loads(r.text) resp = {'error_code': r.status_code, 'message' : j['error']['message']} return json.dumps(resp) def getUser(self, user_id): url = "https://%s/stream/0/users/%s" % (self.public_api_anchor, user_id) return self.getRequest(url) def getUserPosts(self, user_id): url = "https://%s/stream/0/users/%s/posts" % (self.public_api_anchor, user_id) return self.getRequest(url) def getUserStars(self, user_id): url = "https://%s/stream/0/users/%s/stars" % (self.public_api_anchor, user_id) return self.getRequest(url) def getGlobalStream(self): url = "https://%s/stream/0/posts/stream/global" % self.public_api_anchor return self.getRequest(url) def getUserStream(self): url = "https://%s/stream/0/posts/stream" % self.public_api_anchor return self.getRequest(url) def getUserMentions(self, user_id): url = "https://%s/stream/0/users/%s/mentions" % (self.public_api_anchor,user_id) return self.getRequest(url) def getPost(self, post_id): url = "https://%s/stream/0/posts/%s" % (self.public_api_anchor,post_id) return self.getRequest(url) def getReposters(self, post_id): url ="https://%s/stream/0/posts/%s/reposters" % (self.public_api_anchor,post_id) return self.getRequest(url) def getStars(self, post_id): url ="https://%s/stream/0/posts/%s/stars" % (self.public_api_anchor,post_id) return self.getRequest(url) def getPostReplies(self, post_id): url = "https://%s/stream/0/posts/%s/replies" % (self.public_api_anchor,post_id) return self.getRequest(url) def getPostsByTag(self, tag): url = "https://%s/stream/0/posts/tag/%s" % (self.public_api_anchor, tag) return self.getRequest(url) def getUserFollowing(self, user_id, since_id=None, before_id=None): url = "https://%s/stream/0/users/%s/following" % (self.public_api_anchor, user_id) return self.getRequest(url, getParameters={ 'since_id': since_id, 'before_id': before_id, }) def getUserFollowingIds(self, user_id, since_id=None, before_id=None): url = "https://%s/stream/0/users/%s/following/ids" % (self.public_api_anchor, user_id) return self.getRequest(url, getParameters={ 'since_id': since_id, 'before_id': before_id, }) def getUserFollowers(self, user_id): url = "https://%s/stream/0/users/%s/followers" % (self.public_api_anchor, user_id) return self.getRequest(url) def getMutedUsers(self): url = "https://%s/stream/0/users/me/muted" % self.public_api_anchor return self.getRequest(url) def searchUsers(self,q): url = "https://%s/stream/0/users/search" % (self.public_api_anchor) return self.getRequest(url,getParameters={'q':q}) def getCurrentToken(self): url = "https://%s/stream/0/token" % self.public_api_anchor return self.getRequest(url) #POST REQUESTS def postRequest(self, url, data=None, headers=None): if not data: data = {} if not headers: headers = {} headers['Authorization'] = 'Bearer %s' % self.access_token url = url r = requests.post(url,data=json.dumps(data),headers=headers) if r.status_code == requests.codes.ok: return r.text else: try: j = json.loads(r.text) resp = {'error_code': r.status_code, 'message' : j['error']['message']} return resp except: #generic error print r.text return "{'error':'There was an error'}" def followUser(self,user_id): url = "https://%s/stream/0/users/%s/follow" % (self.public_api_anchor, user_id) return self.postRequest(url) def repostPost(self,post_id): url = "https://%s/stream/0/posts/%s/repost" % (self.public_api_anchor, post_id) return self.postRequest(url) def starPost(self,post_id): url = "https://%s/stream/0/posts/%s/star" % (self.public_api_anchor, post_id) return self.postRequest(url) def muteUser(self,user_id): url = "https://%s/stream/0/users/%s/mute" % (self.public_api_anchor, user_id) return self.postRequest(url) #requires: text #optional: reply_to, annotations, links def createPost(self, text, reply_to = None, annotations=None, links=None): url = "https://%s/stream/0/posts" % self.public_api_anchor if annotations != None: url = url + "?include_annotations=1" data = {'text':text} if reply_to != None: data['reply_to'] = reply_to if annotations != None: data['annotations'] = annotations if links != None: data['links'] = links return self.postRequest(url,data,headers={'content-type':'application/json'}) #DELETE request def deleteRequest(self, url): url = url + "?access_token=" + self.access_token r = requests.delete(url) if r.status_code == requests.codes.ok: return r.text else: try: j = json.loads(r.text) resp = {'error_code': r.status_code, 'message' : j['error']['message']} return resp except: #generic error print r.text return "{'error':'There was an error'}" def deletePost(self, post_id): url = "https://%s/stream/0/posts/%s" % (self.public_api_anchor,post_id) return self.deleteRequest(url) def unrepostPost(self, post_id): url = "https://%s/stream/0/posts/%s/repost" % (self.public_api_anchor,post_id) return self.deleteRequest(url) def unstarPost(self, post_id): url = "https://%s/stream/0/posts/%s/star" % (self.public_api_anchor,post_id) return self.deleteRequest(url) def unfollowUser(self, user_id): url = "https://%s/stream/0/users/%s/follow" % (self.public_api_anchor,user_id) return self.deleteRequest(url) def unmuteUser(self, user_id): url = "https://%s/stream/0/users/%s/mute" % (self.public_api_anchor,user_id) return self.deleteRequest(url)
mit
fga-verival/2017-1Grupo2
backend/game/tests/acceptance/test_13.py
1
1980
# -*- coding: utf-8 -*- from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re class TC13(unittest.TestCase): def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) self.base_url = "http://localhost:8000" self.verificationErrors = [] self.accept_next_alert = True def test_t_c13(self): driver = self.driver driver.get(self.base_url + "/admin/login/?next=/admin/") driver.find_element_by_id("id_password").clear() driver.find_element_by_id("id_password").send_keys("qwer1234") driver.find_element_by_id("id_password").clear() driver.find_element_by_id("id_password").send_keys("qwer1234") driver.find_element_by_css_selector("input.btn.btn-info").click() driver.find_element_by_css_selector("input.btn.btn-info").click() def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException as e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException as e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
gpl-3.0
giggsey/SickRage
lib/github/Permissions.py
74
3027
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2012 Vincent Jacques <[email protected]> # # Copyright 2012 Zearin <[email protected]> # # Copyright 2013 AKFish <[email protected]> # # Copyright 2013 Vincent Jacques <[email protected]> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import github.GithubObject class Permissions(github.GithubObject.NonCompletableGithubObject): """ This class represents Permissionss as returned for example by http://developer.github.com/v3/todo """ @property def admin(self): """ :type: bool """ return self._admin.value @property def pull(self): """ :type: bool """ return self._pull.value @property def push(self): """ :type: bool """ return self._push.value def _initAttributes(self): self._admin = github.GithubObject.NotSet self._pull = github.GithubObject.NotSet self._push = github.GithubObject.NotSet def _useAttributes(self, attributes): if "admin" in attributes: # pragma no branch self._admin = self._makeBoolAttribute(attributes["admin"]) if "pull" in attributes: # pragma no branch self._pull = self._makeBoolAttribute(attributes["pull"]) if "push" in attributes: # pragma no branch self._push = self._makeBoolAttribute(attributes["push"])
gpl-3.0
Sh4kE/ofm_helper
core/tests/unit/views/ofm_views/test_ofm_finances_view.py
2
4895
import json from django.core.urlresolvers import reverse from django.test import TestCase from core.factories.core_factories import MatchdayFactory, FinanceFactory from users.models import OFMUser class OFMFinancesViewTestCase(TestCase): def setUp(self): self.matchday = MatchdayFactory.create() self.next_matchday = MatchdayFactory.create(number=1) self.user1 = OFMUser.objects.create_user( username='alice', email='[email protected]', password='alice', ofm_username='alice', ofm_password='alice' ) self.finances = FinanceFactory.create(user=self.user1, matchday=self.matchday) self.next_finances = FinanceFactory.create( user=self.user1, matchday=self.next_matchday, balance=2000, income_visitors_league=200, expenses_player_salaries=200 ) self.client.login(username='alice', password='alice') def test_user_can_see_his_finances(self): response = self.client.get(reverse('core:ofm:finance_overview')) self.assertEqual(response.status_code, 200) self.assertTrue('matchdays' in response.context_data) def test_user_can_choose_between_matchdays(self): response = self.client.get(reverse('core:ofm:finance_overview')) self.assertEqual(response.status_code, 200) self.assertEqual(self.next_matchday, response.context_data['matchdays'][0]) self.assertEqual(self.matchday, response.context_data['matchdays'][1]) def test_user_can_see_his_latest_finances_when_given_no_matchday(self): response = self.client.get(reverse('core:ofm:finances_json')) self.assertEqual(response.status_code, 200) returned_json_data = json.loads(response.content.decode('utf-8')) self.assertEqual(len(returned_json_data), 1) self.assertEqual(returned_json_data[0]['account_balance'], 2000) self.assertEqual(returned_json_data[0]['income_visitors_league'], 200) self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -200) def test_user_can_see_his_finances_diff_when_given_both_matchdays(self): third_matchday = MatchdayFactory.create(number=self.matchday.number + 2) FinanceFactory.create( user=self.user1, matchday=third_matchday, balance=2500, income_visitors_league=250, income_sponsoring=250, expenses_player_salaries=250, expenses_youth=100 ) response = self.client.get(reverse('core:ofm:finances_json'), {'newer_matchday_season': third_matchday.season.number, 'newer_matchday': third_matchday.number, 'older_matchday_season': self.matchday.season.number, 'older_matchday': self.matchday.number }) self.assertEqual(response.status_code, 200) returned_json_data = json.loads(response.content.decode('utf-8')) self.assertEqual(len(returned_json_data), 1) self.assertEqual(returned_json_data[0]['account_balance'], 2500) self.assertEqual(returned_json_data[0]['balance'], 150) self.assertEqual(returned_json_data[0]['sum_income'], 400) self.assertEqual(returned_json_data[0]['sum_expenses'], -250) self.assertEqual(returned_json_data[0]['income_visitors_league'], 150) self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -150) def test_user_can_see_his_finances_diff_when_given_only_newer_matchday(self): third_matchday = MatchdayFactory.create(number=self.matchday.number + 2) FinanceFactory.create( user=self.user1, matchday=third_matchday, balance=2500, income_visitors_league=250, expenses_player_salaries=250 ) response = self.client.get(reverse('core:ofm:finances_json'), {'newer_matchday_season': third_matchday.season.number, 'newer_matchday': third_matchday.number }) self.assertEqual(response.status_code, 200) returned_json_data = json.loads(response.content.decode('utf-8')) self.assertEqual(len(returned_json_data), 1) self.assertEqual(returned_json_data[0]['account_balance'], 2500) self.assertEqual(returned_json_data[0]['balance'], 0) self.assertEqual(returned_json_data[0]['sum_income'], 250) self.assertEqual(returned_json_data[0]['sum_expenses'], -250) self.assertEqual(returned_json_data[0]['income_visitors_league'], 250) self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -250)
agpl-3.0
whip112/Whip112
vendor/packages/lockfile/symlinklockfile.py
487
2613
from __future__ import absolute_import import time import os from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, AlreadyLocked) class SymlinkLockFile(LockBase): """Lock access to a file using symlink(2).""" def __init__(self, path, threaded=True, timeout=None): # super(SymlinkLockFile).__init(...) LockBase.__init__(self, path, threaded, timeout) # split it back! self.unique_name = os.path.split(self.unique_name)[1] def acquire(self, timeout=None): # Hopefully unnecessary for symlink. #try: # open(self.unique_name, "wb").close() #except IOError: # raise LockFailed("failed to create %s" % self.unique_name) timeout = timeout is not None and timeout or self.timeout end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout while True: # Try and create a symbolic link to it. try: os.symlink(self.unique_name, self.lock_file) except OSError: # Link creation failed. Maybe we've double-locked? if self.i_am_locking(): # Linked to out unique name. Proceed. return else: # Otherwise the lock creation failed. if timeout is not None and time.time() > end_time: if timeout > 0: raise LockTimeout("Timeout waiting to acquire" " lock for %s" % self.path) else: raise AlreadyLocked("%s is already locked" % self.path) time.sleep(timeout/10 if timeout is not None else 0.1) else: # Link creation succeeded. We're good to go. return def release(self): if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) elif not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me" % self.path) os.unlink(self.lock_file) def is_locked(self): return os.path.islink(self.lock_file) def i_am_locking(self): return os.path.islink(self.lock_file) and \ os.readlink(self.lock_file) == self.unique_name def break_lock(self): if os.path.islink(self.lock_file): # exists && link os.unlink(self.lock_file)
mpl-2.0
cedk/odoo
addons/website_sale_options/controllers/main.py
236
3610
# -*- coding: utf-8 -*- from openerp import SUPERUSER_ID from openerp.addons.web import http from openerp.addons.web.http import request from openerp.addons.website_sale.controllers.main import website_sale class website_sale_options(website_sale): @http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True) def product(self, product, category='', search='', **kwargs): r = super(website_sale_options, self).product(product, category, search, **kwargs) cr, uid, context, pool = request.cr, request.uid, request.context, request.registry template_obj = pool['product.template'] optional_product_ids = [] for p in product.optional_product_ids: ctx = dict(context, active_id=p.id) optional_product_ids.append(template_obj.browse(cr, uid, p.id, context=ctx)) r.qcontext['optional_product_ids'] = optional_product_ids return r @http.route(['/shop/cart/update_option'], type='http', auth="public", methods=['POST'], website=True, multilang=False) def cart_options_update_json(self, product_id, add_qty=1, set_qty=0, goto_shop=None, lang=None, **kw): cr, uid, context, pool = request.cr, request.uid, request.context, request.registry if lang: context = dict(context, lang=lang) request.website = request.website.with_context(context) order = request.website.sale_get_order(force_create=1) product = pool['product.product'].browse(cr, uid, int(product_id), context=context) option_ids = [p.id for tmpl in product.optional_product_ids for p in tmpl.product_variant_ids] optional_product_ids = [] for k, v in kw.items(): if "optional-product-" in k and int(kw.get(k.replace("product", "add"))) and int(v) in option_ids: optional_product_ids.append(int(v)) value = {} if add_qty or set_qty: value = order._cart_update(product_id=int(product_id), add_qty=int(add_qty), set_qty=int(set_qty), optional_product_ids=optional_product_ids) # options have all time the same quantity for option_id in optional_product_ids: order._cart_update(product_id=option_id, set_qty=value.get('quantity'), linked_line_id=value.get('line_id')) return str(order.cart_quantity) @http.route(['/shop/modal'], type='json', auth="public", methods=['POST'], website=True) def modal(self, product_id, **kw): cr, uid, context, pool = request.cr, request.uid, request.context, request.registry pricelist = self.get_pricelist() if not context.get('pricelist'): context['pricelist'] = int(pricelist) website_context = kw.get('kwargs', {}).get('context', {}) context = dict(context or {}, **website_context) from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context) to_currency = pricelist.currency_id compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context) product = pool['product.product'].browse(cr, uid, int(product_id), context=context) request.website = request.website.with_context(context) return request.website._render("website_sale_options.modal", { 'product': product, 'compute_currency': compute_currency, 'get_attribute_value_ids': self.get_attribute_value_ids, })
agpl-3.0
andreimacavei/coala
coalib/tests/bearlib/abstractions/SectionCreatableTest.py
2
2504
import sys sys.path.insert(0, ".") import unittest from coalib.bearlib.abstractions.SectionCreatable import SectionCreatable from coalib.settings.Section import Section, Setting class TestObject(SectionCreatable): def __init__(self, setting_one: int, raw_setting, setting_two: bool=False, setting_three: list=[1, 2], opt_raw_set=5): SectionCreatable.__init__(self) assert isinstance(setting_one, int) assert isinstance(raw_setting, Setting) assert isinstance(setting_two, bool) assert isinstance(setting_three, list) assert isinstance(opt_raw_set, Setting) or isinstance(opt_raw_set, int) self.setting_one = setting_one self.raw_setting = raw_setting self.setting_two = setting_two self.setting_three = setting_three self.opt_raw_set = opt_raw_set class SectionCreatableTest(unittest.TestCase): def test_api(self): uut = SectionCreatable() self.assertRaises(TypeError, uut.from_section, 5) self.assertEqual(uut.get_non_optional_settings(), {}) self.assertEqual(uut.get_optional_settings(), {}) def test_needed_settings(self): self.assertEqual(sorted(list(TestObject.get_non_optional_settings())), sorted(["setting_one", "raw_setting"])) self.assertEqual( sorted(list(TestObject.get_optional_settings())), sorted(["setting_two", "setting_three", "opt_raw_set"])) def test_from_section(self): section = Section("name") section.append(Setting("setting_one", " 5")) section.append(Setting("raw_setting", " 5s")) uut = TestObject.from_section(section) self.assertEqual(uut.setting_one, 5) self.assertEqual(str(uut.raw_setting), "5s") self.assertEqual(uut.setting_two, False) self.assertEqual(uut.setting_three, [1, 2]) self.assertEqual(str(uut.opt_raw_set), "5") section.append(Setting("setting_three", "2, 4")) section.append(Setting("opt_raw_set", "tst ,")) uut = TestObject.from_section(section) self.assertEqual(uut.setting_one, 5) self.assertEqual(str(uut.raw_setting), "5s") self.assertEqual(uut.setting_two, False) self.assertEqual(uut.setting_three, ["2", "4"]) self.assertEqual(str(uut.opt_raw_set), "tst ,") if __name__ == '__main__': unittest.main(verbosity=2)
agpl-3.0
kingsamchen/Eureka
crack-data-structures-and-algorithms/leetcode/populating_next_right_pointers_in_each_node_II_q117.py
1
1821
""" # Definition for a Node. class Node(object): def __init__(self, val=0, left=None, right=None, next=None): self.val = val self.left = left self.right = right self.next = next """ # 核心思路 # 递归处理,因为题目已经说过递归调用栈不算extra space # 对于每一个节点,首先找到这个节点的不为空的,最靠右的孩子节点,作为下一层连接操作的左端 # 然后在这一层往右扫描,找到第一个有孩子结点的结点,他的最靠左的孩子节点作为连接端的右端 # 如果上一个操作能找到左右两端,则进行连接 # 然后递归处理当前节点的子结点情况。注意:一定要先处理右孩子,再处理左孩子,因为前面水平扫描是自左向右。 class Solution(object): def connect(self, root): """ :type root: Node :rtype: Node """ if not root: return root # connect children, even root.right == None is well taken. if root.left: root.left.next = root.right # locate the right part node of left children. lc_right = root.right if root.right else root.left # because a node may have no child, therefore # scan from left to right, stop at the first node who has at least one child. pn = root.next while pn and not (pn.left or pn.right): pn = pn.next # connect if may. if pn and lc_right: next_left = pn.left if pn.left else pn.right lc_right.next = next_left # recursively. # NOTE: must handle right part ahead of left part. # because our horizontal scan is from left to right. self.connect(root.right) self.connect(root.left) return root
mit
Changaco/oh-mainline
vendor/packages/Django/django/contrib/admin/templatetags/admin_modify.py
101
2428
from django import template register = template.Library() @register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True) def prepopulated_fields_js(context): """ Creates a list of prepopulated_fields that should render Javascript for the prepopulated fields for both the admin form and inlines. """ prepopulated_fields = [] if context['add'] and 'adminform' in context: prepopulated_fields.extend(context['adminform'].prepopulated_fields) if 'inline_admin_formsets' in context: for inline_admin_formset in context['inline_admin_formsets']: for inline_admin_form in inline_admin_formset: if inline_admin_form.original is None: prepopulated_fields.extend(inline_admin_form.prepopulated_fields) context.update({'prepopulated_fields': prepopulated_fields}) return context @register.inclusion_tag('admin/submit_line.html', takes_context=True) def submit_row(context): """ Displays the row of buttons for delete and save. """ opts = context['opts'] change = context['change'] is_popup = context['is_popup'] save_as = context['save_as'] ctx = { 'opts': opts, 'onclick_attrib': (opts.get_ordered_objects() and change and 'onclick="submitOrderForm();"' or ''), 'show_delete_link': (not is_popup and context['has_delete_permission'] and change and context.get('show_delete', True)), 'show_save_as_new': not is_popup and change and save_as, 'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']), 'show_save_and_continue': not is_popup and context['has_change_permission'], 'is_popup': is_popup, 'show_save': True } if context.get('original') is not None: ctx['original'] = context['original'] return ctx @register.filter def cell_count(inline_admin_form): """Returns the number of cells used in a tabular inline""" count = 1 # Hidden cell with hidden 'id' field for fieldset in inline_admin_form: # Loop through all the fields (one per cell) for line in fieldset: for field in line: count += 1 if inline_admin_form.formset.can_delete: # Delete checkbox count += 1 return count
agpl-3.0
codeforamerica/skillcamp
ENV/lib/python2.7/site-packages/psycopg2/tests/test_extras_dictcursor.py
62
17404
#!/usr/bin/env python # # extras_dictcursor - test if DictCursor extension class works # # Copyright (C) 2004-2010 Federico Di Gregorio <[email protected]> # # psycopg2 is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # psycopg2 is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. import time from datetime import timedelta import psycopg2 import psycopg2.extras from testutils import unittest, ConnectingTestCase, skip_before_postgres from testutils import skip_if_no_namedtuple class ExtrasDictCursorTests(ConnectingTestCase): """Test if DictCursor extension class works.""" def setUp(self): ConnectingTestCase.setUp(self) curs = self.conn.cursor() curs.execute("CREATE TEMPORARY TABLE ExtrasDictCursorTests (foo text)") curs.execute("INSERT INTO ExtrasDictCursorTests VALUES ('bar')") self.conn.commit() def testDictConnCursorArgs(self): self.conn.close() self.conn = self.connect(connection_factory=psycopg2.extras.DictConnection) cur = self.conn.cursor() self.assert_(isinstance(cur, psycopg2.extras.DictCursor)) self.assertEqual(cur.name, None) # overridable cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.NamedTupleCursor) self.assertEqual(cur.name, 'foo') self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor)) def testDictCursorWithPlainCursorFetchOne(self): self._testWithPlainCursor(lambda curs: curs.fetchone()) def testDictCursorWithPlainCursorFetchMany(self): self._testWithPlainCursor(lambda curs: curs.fetchmany(100)[0]) def testDictCursorWithPlainCursorFetchManyNoarg(self): self._testWithPlainCursor(lambda curs: curs.fetchmany()[0]) def testDictCursorWithPlainCursorFetchAll(self): self._testWithPlainCursor(lambda curs: curs.fetchall()[0]) def testDictCursorWithPlainCursorIter(self): def getter(curs): for row in curs: return row self._testWithPlainCursor(getter) def testUpdateRow(self): row = self._testWithPlainCursor(lambda curs: curs.fetchone()) row['foo'] = 'qux' self.failUnless(row['foo'] == 'qux') self.failUnless(row[0] == 'qux') @skip_before_postgres(8, 0) def testDictCursorWithPlainCursorIterRowNumber(self): curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) self._testIterRowNumber(curs) def _testWithPlainCursor(self, getter): curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) curs.execute("SELECT * FROM ExtrasDictCursorTests") row = getter(curs) self.failUnless(row['foo'] == 'bar') self.failUnless(row[0] == 'bar') return row def testDictCursorWithPlainCursorRealFetchOne(self): self._testWithPlainCursorReal(lambda curs: curs.fetchone()) def testDictCursorWithPlainCursorRealFetchMany(self): self._testWithPlainCursorReal(lambda curs: curs.fetchmany(100)[0]) def testDictCursorWithPlainCursorRealFetchManyNoarg(self): self._testWithPlainCursorReal(lambda curs: curs.fetchmany()[0]) def testDictCursorWithPlainCursorRealFetchAll(self): self._testWithPlainCursorReal(lambda curs: curs.fetchall()[0]) def testDictCursorWithPlainCursorRealIter(self): def getter(curs): for row in curs: return row self._testWithPlainCursorReal(getter) @skip_before_postgres(8, 0) def testDictCursorWithPlainCursorRealIterRowNumber(self): curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) self._testIterRowNumber(curs) def _testWithPlainCursorReal(self, getter): curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) curs.execute("SELECT * FROM ExtrasDictCursorTests") row = getter(curs) self.failUnless(row['foo'] == 'bar') def testDictCursorWithNamedCursorFetchOne(self): self._testWithNamedCursor(lambda curs: curs.fetchone()) def testDictCursorWithNamedCursorFetchMany(self): self._testWithNamedCursor(lambda curs: curs.fetchmany(100)[0]) def testDictCursorWithNamedCursorFetchManyNoarg(self): self._testWithNamedCursor(lambda curs: curs.fetchmany()[0]) def testDictCursorWithNamedCursorFetchAll(self): self._testWithNamedCursor(lambda curs: curs.fetchall()[0]) def testDictCursorWithNamedCursorIter(self): def getter(curs): for row in curs: return row self._testWithNamedCursor(getter) @skip_before_postgres(8, 2) def testDictCursorWithNamedCursorNotGreedy(self): curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor) self._testNamedCursorNotGreedy(curs) @skip_before_postgres(8, 0) def testDictCursorWithNamedCursorIterRowNumber(self): curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor) self._testIterRowNumber(curs) def _testWithNamedCursor(self, getter): curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.DictCursor) curs.execute("SELECT * FROM ExtrasDictCursorTests") row = getter(curs) self.failUnless(row['foo'] == 'bar') self.failUnless(row[0] == 'bar') def testDictCursorRealWithNamedCursorFetchOne(self): self._testWithNamedCursorReal(lambda curs: curs.fetchone()) def testDictCursorRealWithNamedCursorFetchMany(self): self._testWithNamedCursorReal(lambda curs: curs.fetchmany(100)[0]) def testDictCursorRealWithNamedCursorFetchManyNoarg(self): self._testWithNamedCursorReal(lambda curs: curs.fetchmany()[0]) def testDictCursorRealWithNamedCursorFetchAll(self): self._testWithNamedCursorReal(lambda curs: curs.fetchall()[0]) def testDictCursorRealWithNamedCursorIter(self): def getter(curs): for row in curs: return row self._testWithNamedCursorReal(getter) @skip_before_postgres(8, 2) def testDictCursorRealWithNamedCursorNotGreedy(self): curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor) self._testNamedCursorNotGreedy(curs) @skip_before_postgres(8, 0) def testDictCursorRealWithNamedCursorIterRowNumber(self): curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor) self._testIterRowNumber(curs) def _testWithNamedCursorReal(self, getter): curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.RealDictCursor) curs.execute("SELECT * FROM ExtrasDictCursorTests") row = getter(curs) self.failUnless(row['foo'] == 'bar') def _testNamedCursorNotGreedy(self, curs): curs.itersize = 2 curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""") recs = [] for t in curs: time.sleep(0.01) recs.append(t) # check that the dataset was not fetched in a single gulp self.assert_(recs[1]['ts'] - recs[0]['ts'] < timedelta(seconds=0.005)) self.assert_(recs[2]['ts'] - recs[1]['ts'] > timedelta(seconds=0.0099)) def _testIterRowNumber(self, curs): # Only checking for dataset < itersize: # see CursorTests.test_iter_named_cursor_rownumber curs.itersize = 20 curs.execute("""select * from generate_series(1,10)""") for i, r in enumerate(curs): self.assertEqual(i + 1, curs.rownumber) def testPickleDictRow(self): import pickle curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) curs.execute("select 10 as a, 20 as b") r = curs.fetchone() d = pickle.dumps(r) r1 = pickle.loads(d) self.assertEqual(r, r1) self.assertEqual(r[0], r1[0]) self.assertEqual(r[1], r1[1]) self.assertEqual(r['a'], r1['a']) self.assertEqual(r['b'], r1['b']) self.assertEqual(r._index, r1._index) def testPickleRealDictRow(self): import pickle curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) curs.execute("select 10 as a, 20 as b") r = curs.fetchone() d = pickle.dumps(r) r1 = pickle.loads(d) self.assertEqual(r, r1) self.assertEqual(r['a'], r1['a']) self.assertEqual(r['b'], r1['b']) self.assertEqual(r._column_mapping, r1._column_mapping) class NamedTupleCursorTest(ConnectingTestCase): def setUp(self): ConnectingTestCase.setUp(self) from psycopg2.extras import NamedTupleConnection try: from collections import namedtuple except ImportError: return self.conn = self.connect(connection_factory=NamedTupleConnection) curs = self.conn.cursor() curs.execute("CREATE TEMPORARY TABLE nttest (i int, s text)") curs.execute("INSERT INTO nttest VALUES (1, 'foo')") curs.execute("INSERT INTO nttest VALUES (2, 'bar')") curs.execute("INSERT INTO nttest VALUES (3, 'baz')") self.conn.commit() @skip_if_no_namedtuple def test_cursor_args(self): cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor) self.assertEqual(cur.name, 'foo') self.assert_(isinstance(cur, psycopg2.extras.DictCursor)) @skip_if_no_namedtuple def test_fetchone(self): curs = self.conn.cursor() curs.execute("select * from nttest order by 1") t = curs.fetchone() self.assertEqual(t[0], 1) self.assertEqual(t.i, 1) self.assertEqual(t[1], 'foo') self.assertEqual(t.s, 'foo') self.assertEqual(curs.rownumber, 1) self.assertEqual(curs.rowcount, 3) @skip_if_no_namedtuple def test_fetchmany_noarg(self): curs = self.conn.cursor() curs.arraysize = 2 curs.execute("select * from nttest order by 1") res = curs.fetchmany() self.assertEqual(2, len(res)) self.assertEqual(res[0].i, 1) self.assertEqual(res[0].s, 'foo') self.assertEqual(res[1].i, 2) self.assertEqual(res[1].s, 'bar') self.assertEqual(curs.rownumber, 2) self.assertEqual(curs.rowcount, 3) @skip_if_no_namedtuple def test_fetchmany(self): curs = self.conn.cursor() curs.execute("select * from nttest order by 1") res = curs.fetchmany(2) self.assertEqual(2, len(res)) self.assertEqual(res[0].i, 1) self.assertEqual(res[0].s, 'foo') self.assertEqual(res[1].i, 2) self.assertEqual(res[1].s, 'bar') self.assertEqual(curs.rownumber, 2) self.assertEqual(curs.rowcount, 3) @skip_if_no_namedtuple def test_fetchall(self): curs = self.conn.cursor() curs.execute("select * from nttest order by 1") res = curs.fetchall() self.assertEqual(3, len(res)) self.assertEqual(res[0].i, 1) self.assertEqual(res[0].s, 'foo') self.assertEqual(res[1].i, 2) self.assertEqual(res[1].s, 'bar') self.assertEqual(res[2].i, 3) self.assertEqual(res[2].s, 'baz') self.assertEqual(curs.rownumber, 3) self.assertEqual(curs.rowcount, 3) @skip_if_no_namedtuple def test_executemany(self): curs = self.conn.cursor() curs.executemany("delete from nttest where i = %s", [(1,), (2,)]) curs.execute("select * from nttest order by 1") res = curs.fetchall() self.assertEqual(1, len(res)) self.assertEqual(res[0].i, 3) self.assertEqual(res[0].s, 'baz') @skip_if_no_namedtuple def test_iter(self): curs = self.conn.cursor() curs.execute("select * from nttest order by 1") i = iter(curs) self.assertEqual(curs.rownumber, 0) t = i.next() self.assertEqual(t.i, 1) self.assertEqual(t.s, 'foo') self.assertEqual(curs.rownumber, 1) self.assertEqual(curs.rowcount, 3) t = i.next() self.assertEqual(t.i, 2) self.assertEqual(t.s, 'bar') self.assertEqual(curs.rownumber, 2) self.assertEqual(curs.rowcount, 3) t = i.next() self.assertEqual(t.i, 3) self.assertEqual(t.s, 'baz') self.assertRaises(StopIteration, i.next) self.assertEqual(curs.rownumber, 3) self.assertEqual(curs.rowcount, 3) def test_error_message(self): try: from collections import namedtuple except ImportError: # an import error somewhere from psycopg2.extras import NamedTupleConnection try: self.conn = self.connect( connection_factory=NamedTupleConnection) curs = self.conn.cursor() curs.execute("select 1") curs.fetchone() except ImportError: pass else: self.fail("expecting ImportError") else: return self.skipTest("namedtuple available") @skip_if_no_namedtuple def test_record_updated(self): curs = self.conn.cursor() curs.execute("select 1 as foo;") r = curs.fetchone() self.assertEqual(r.foo, 1) curs.execute("select 2 as bar;") r = curs.fetchone() self.assertEqual(r.bar, 2) self.assertRaises(AttributeError, getattr, r, 'foo') @skip_if_no_namedtuple def test_no_result_no_surprise(self): curs = self.conn.cursor() curs.execute("update nttest set s = s") self.assertRaises(psycopg2.ProgrammingError, curs.fetchone) curs.execute("update nttest set s = s") self.assertRaises(psycopg2.ProgrammingError, curs.fetchall) @skip_if_no_namedtuple def test_minimal_generation(self): # Instrument the class to verify it gets called the minimum number of times. from psycopg2.extras import NamedTupleCursor f_orig = NamedTupleCursor._make_nt calls = [0] def f_patched(self_): calls[0] += 1 return f_orig(self_) NamedTupleCursor._make_nt = f_patched try: curs = self.conn.cursor() curs.execute("select * from nttest order by 1") curs.fetchone() curs.fetchone() curs.fetchone() self.assertEqual(1, calls[0]) curs.execute("select * from nttest order by 1") curs.fetchone() curs.fetchall() self.assertEqual(2, calls[0]) curs.execute("select * from nttest order by 1") curs.fetchone() curs.fetchmany(1) self.assertEqual(3, calls[0]) finally: NamedTupleCursor._make_nt = f_orig @skip_if_no_namedtuple @skip_before_postgres(8, 0) def test_named(self): curs = self.conn.cursor('tmp') curs.execute("""select i from generate_series(0,9) i""") recs = [] recs.extend(curs.fetchmany(5)) recs.append(curs.fetchone()) recs.extend(curs.fetchall()) self.assertEqual(range(10), [t.i for t in recs]) @skip_if_no_namedtuple def test_named_fetchone(self): curs = self.conn.cursor('tmp') curs.execute("""select 42 as i""") t = curs.fetchone() self.assertEqual(t.i, 42) @skip_if_no_namedtuple def test_named_fetchmany(self): curs = self.conn.cursor('tmp') curs.execute("""select 42 as i""") recs = curs.fetchmany(10) self.assertEqual(recs[0].i, 42) @skip_if_no_namedtuple def test_named_fetchall(self): curs = self.conn.cursor('tmp') curs.execute("""select 42 as i""") recs = curs.fetchall() self.assertEqual(recs[0].i, 42) @skip_if_no_namedtuple @skip_before_postgres(8, 2) def test_not_greedy(self): curs = self.conn.cursor('tmp') curs.itersize = 2 curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""") recs = [] for t in curs: time.sleep(0.01) recs.append(t) # check that the dataset was not fetched in a single gulp self.assert_(recs[1].ts - recs[0].ts < timedelta(seconds=0.005)) self.assert_(recs[2].ts - recs[1].ts > timedelta(seconds=0.0099)) @skip_if_no_namedtuple @skip_before_postgres(8, 0) def test_named_rownumber(self): curs = self.conn.cursor('tmp') # Only checking for dataset < itersize: # see CursorTests.test_iter_named_cursor_rownumber curs.itersize = 4 curs.execute("""select * from generate_series(1,3)""") for i, t in enumerate(curs): self.assertEqual(i + 1, curs.rownumber) def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == "__main__": unittest.main()
mit
MaxStrange/ArtieInfant
scripts/plotaudio/plotaudio.py
1
2598
""" This is code that I find I use a LOT while debugging or analyzing. """ import audiosegment import math import matplotlib.pyplot as plt import numpy as np import os import sys ################################################# #### These are the parameters I have been using # ################################################# # ---- for long spectrograms ------ sample_rate_hz = 16000.0 # 16kHz sample rate bytewidth = 2 # 16-bit samples nchannels = 1 # mono duration_s = 0.5 # Duration of each complete spectrogram window_length_s = 0.03 # How long each FFT is overlap = 0.2 # How much each FFT overlaps with each other one # ---- for short spectrograms ------ #sample_rate_hz = 8000.0 # 8kHz sample rate #bytewidth = 2 # 16-bit samples #nchannels = 1 # mono #duration_s = 0.3 # Duration of each complete spectrogram #window_length_s = 0.02 # How long each FFT is #overlap = 0.2 # How much each FFT overlaps with each other one ################################################# if __name__ == "__main__": if len(sys.argv) != 2: print("Need a path to a WAV file.") exit(1) seg = audiosegment.from_file(sys.argv[1]) print(seg) print(" -> RMS:", seg.rms) print(" -> SPL:", seg.spl) print(" -> Length (s):", seg.duration_seconds) print(" -> NChannels:", seg.channels) print(" -> Frequency (Hz):", seg.frame_rate) print(" -> Bytes per sample:", seg.sample_width) print(" -> Human audible?", seg.human_audible()) name = os.path.basename(sys.argv[1]) name, _ext = os.path.splitext(name) plt.title("Raw Values") arr = seg.to_numpy_array() times = np.linspace(0, len(arr) / seg.frame_rate, num=len(arr)) plt.plot(times, seg.to_numpy_array()) plt.xlabel("Time (s)") plt.ylabel("PCM") plt.savefig("{}-waveform.png".format(name)) plt.show() plt.title("Histogram") hist_bins, hist_vals = seg.fft() hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals) plt.plot(hist_bins/1000, hist_vals_real_normed) plt.xlabel("kHz") plt.ylabel("dB") plt.savefig("{}-histogram.png".format(name)) plt.show() plt.title("Spectrogram") fs, ts, amps = seg.spectrogram(0, duration_s, window_length_s=window_length_s, overlap=overlap, window=('tukey', 0.5)) #amps = 10.0 * np.log10(amps) plt.pcolormesh(ts, fs, amps) plt.xlabel("Time (s)") plt.ylabel("Hz") plt.savefig("{}-spectrogram.png".format(name)) plt.show()
mit
glaubitz/fs-uae-debian
arcade/launcher/ui/floppiesgroup.py
2
4358
import fsui from launcher.cd_manager import CDManager from launcher.floppy_manager import FloppyManager from launcher.i18n import gettext from launcher.option import Option from launcher.ui.behaviors.platformbehavior import ( AMIGA_PLATFORMS, CDEnableBehavior, FloppyEnableBehavior, ) from launcher.ui.floppyselector import FloppySelector from launcher.ui.options import ConfigWidgetFactory class FloppiesGroup(fsui.Group): FLOPPY_MODE = FloppySelector.FLOPPY_MODE CD_MODE = FloppySelector.CD_MODE TAPE_MODE = FloppySelector.TAPE_MODE CARTRIDGE_MODE = FloppySelector.CARTRIDGE_MODE def __init__(self, parent, drives=2, cd_mode=False, removable_media=False): fsui.Group.__init__(self, parent) self.layout = fsui.VerticalLayout() self.cd_mode = cd_mode self.num_drives = drives hori_layout = fsui.HorizontalLayout() self.layout.add(hori_layout, fill=True) self.mode = self.FLOPPY_MODE if cd_mode: self.mode = self.CD_MODE if self.mode == self.CD_MODE: title = gettext("CD-ROM Drive") drive_count_option = Option.CDROM_DRIVE_COUNT behavior_class = CDEnableBehavior elif self.mode == self.TAPE_MODE: title = gettext("Tape Drive") drive_count_option = None behavior_class = None elif self.mode == self.CARTRIDGE_MODE: title = gettext("Cartridge") drive_count_option = None behavior_class = None else: title = gettext("Floppy Drives") drive_count_option = Option.FLOPPY_DRIVE_COUNT behavior_class = FloppyEnableBehavior if removable_media: # Removable media group will change type dynamically behavior_class = None self.label = fsui.HeadingLabel(self, title) hori_layout.add(self.label, margin=10) hori_layout.add_spacer(0, expand=True) if drive_count_option and not removable_media: # FIXME: Drive count option does not work on the main page when # changing to CD mode. Workaround for now is to not include it. hori_layout.add( ConfigWidgetFactory().create( self, drive_count_option, text=gettext("Drive Count"), platforms=AMIGA_PLATFORMS, ), fill=True, margin_right=20, ) self.multi_select_button = fsui.Button( self, gettext("Multi-Select...") ) if self.cd_mode: self.multi_select_button.set_tooltip( gettext("Add Multiple CD-ROMs at Once") ) else: self.multi_select_button.set_tooltip( gettext("Add Multiple Floppies at Once") ) if behavior_class: behavior_class(self.multi_select_button) self.multi_select_button.activated.connect(self.on_multi_select_button) hori_layout.add(self.multi_select_button, margin_right=10) self.layout.add_spacer(0) self.selectors = [] for i in range(drives): selector = FloppySelector(parent, i, show_path=not removable_media) if behavior_class: behavior_class(selector) selector.set_mode(self.mode) self.selectors.append(selector) self.layout.add(selector, fill=True, margin=10, margin_bottom=0) def on_multi_select_button(self): if self.cd_mode: CDManager.multi_select(self.get_window()) else: FloppyManager.multi_select(self.get_window()) def update_heading_label(self): if self.mode == self.CD_MODE: if self.num_drives > 1: self.label.set_text(gettext("CD-ROM Drives")) else: self.label.set_text(gettext("CD-ROM Drive")) elif self.mode == self.TAPE_MODE: self.label.set_text(gettext("Tape Drive")) elif self.mode == self.CARTRIDGE_MODE: self.label.set_text(gettext("Cartridge")) else: self.label.set_text(gettext("Floppy Drives")) # Need to update the layout to account for label widget size change. self.layout.update()
gpl-2.0
kalvdans/scipy
scipy/io/matlab/tests/test_mio.py
14
42133
# -*- coding: latin-1 -*- ''' Nose test generators Need function load / save / roundtrip tests ''' from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin, dirname from glob import glob from io import BytesIO from tempfile import mkdtemp from scipy._lib.six import u, text_type, string_types import warnings import shutil import gzip from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_equal, assert_raises, run_module_suite, assert_) import numpy as np from numpy import array import scipy.sparse as SP import scipy.io.matlab.byteordercodes as boc from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat) from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader, MatlabFunction, varmats_from_mat, to_writeable, EmptyStructMarker) from scipy.io.matlab import mio5_params as mio5p test_data_path = pjoin(dirname(__file__), 'data') def mlarr(*args, **kwargs): """Convenience function to return matlab-compatible 2D array.""" arr = np.array(*args, **kwargs) arr.shape = matdims(arr) return arr # Define cases to test theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) case_table4 = [ {'name': 'double', 'classes': {'testdouble': 'double'}, 'expected': {'testdouble': theta} }] case_table4.append( {'name': 'string', 'classes': {'teststring': 'char'}, 'expected': {'teststring': array([u('"Do nine men interpret?" "Nine men," I nod.')])} }) case_table4.append( {'name': 'complex', 'classes': {'testcomplex': 'double'}, 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} }) A = np.zeros((3,5)) A[0] = list(range(1,6)) A[:,0] = list(range(1,4)) case_table4.append( {'name': 'matrix', 'classes': {'testmatrix': 'double'}, 'expected': {'testmatrix': A}, }) case_table4.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) B = A.astype(complex) B[0,0] += 1j case_table4.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table4.append( {'name': 'multi', 'classes': {'theta': 'double', 'a': 'double'}, 'expected': {'theta': theta, 'a': A}, }) case_table4.append( {'name': 'minus', 'classes': {'testminus': 'double'}, 'expected': {'testminus': mlarr(-1)}, }) case_table4.append( {'name': 'onechar', 'classes': {'testonechar': 'char'}, 'expected': {'testonechar': array([u('r')])}, }) # Cell arrays stored as object arrays CA = mlarr(( # tuple for object array creation [], mlarr([1]), mlarr([[1,2]]), mlarr([[1,2,3]])), dtype=object).reshape(1,-1) CA[0,0] = array( [u('This cell contains this string and 3 arrays of increasing length')]) case_table5 = [ {'name': 'cell', 'classes': {'testcell': 'cell'}, 'expected': {'testcell': CA}}] CAE = mlarr(( # tuple for object array creation mlarr(1), mlarr(2), mlarr([]), mlarr([]), mlarr(3)), dtype=object).reshape(1,-1) objarr = np.empty((1,1),dtype=object) objarr[0,0] = mlarr(1) case_table5.append( {'name': 'scalarcell', 'classes': {'testscalarcell': 'cell'}, 'expected': {'testscalarcell': objarr} }) case_table5.append( {'name': 'emptycell', 'classes': {'testemptycell': 'cell'}, 'expected': {'testemptycell': CAE}}) case_table5.append( {'name': 'stringarray', 'classes': {'teststringarray': 'char'}, 'expected': {'teststringarray': array( [u('one '), u('two '), u('three')])}, }) case_table5.append( {'name': '3dmatrix', 'classes': {'test3dmatrix': 'double'}, 'expected': { 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} }) st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] st1 = np.zeros((1,1), dtype) st1['stringfield'][0,0] = array([u('Rats live on no evil star.')]) st1['doublefield'][0,0] = st_sub_arr st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) case_table5.append( {'name': 'struct', 'classes': {'teststruct': 'struct'}, 'expected': {'teststruct': st1} }) CN = np.zeros((1,2), dtype=object) CN[0,0] = mlarr(1) CN[0,1] = np.zeros((1,3), dtype=object) CN[0,1][0,0] = mlarr(2, dtype=np.uint8) CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) CN[0,1][0,2] = np.zeros((1,2), dtype=object) CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) case_table5.append( {'name': 'cellnest', 'classes': {'testcellnest': 'cell'}, 'expected': {'testcellnest': CN}, }) st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) st2[0,0]['one'] = mlarr(1) st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) st2[0,0]['two'][0,0]['three'] = array([u('number 3')]) case_table5.append( {'name': 'structnest', 'classes': {'teststructnest': 'struct'}, 'expected': {'teststructnest': st2} }) a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) a[0,0]['one'] = mlarr(1) a[0,0]['two'] = mlarr(2) a[0,1]['one'] = array([u('number 1')]) a[0,1]['two'] = array([u('number 2')]) case_table5.append( {'name': 'structarr', 'classes': {'teststructarr': 'struct'}, 'expected': {'teststructarr': a} }) ODT = np.dtype([(n, object) for n in ['expr', 'inputExpr', 'args', 'isEmpty', 'numArgs', 'version']]) MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') m0 = MO[0,0] m0['expr'] = array([u('x')]) m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')]) m0['args'] = array([u('x')]) m0['isEmpty'] = mlarr(0) m0['numArgs'] = mlarr(1) m0['version'] = mlarr(1) case_table5.append( {'name': 'object', 'classes': {'testobject': 'object'}, 'expected': {'testobject': MO} }) fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') u_str = fp_u_str.read().decode('utf-8') fp_u_str.close() case_table5.append( {'name': 'unicode', 'classes': {'testunicode': 'char'}, 'expected': {'testunicode': array([u_str])} }) case_table5.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) case_table5.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table5.append( {'name': 'bool', 'classes': {'testbools': 'logical'}, 'expected': {'testbools': array([[True], [False]])}, }) case_table5_rt = case_table5[:] # Inline functions can't be concatenated in matlab, so RT only case_table5_rt.append( {'name': 'objectarray', 'classes': {'testobjectarray': 'object'}, 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) def types_compatible(var1, var2): """Check if types are same or compatible. 0-D numpy scalars are compatible with bare python scalars. """ type1 = type(var1) type2 = type(var2) if type1 is type2: return True if type1 is np.ndarray and var1.shape == (): return type(var1.item()) is type2 if type2 is np.ndarray and var2.shape == (): return type(var2.item()) is type1 return False def _check_level(label, expected, actual): """ Check one level of a potentially nested array """ if SP.issparse(expected): # allow different types of sparse matrices assert_(SP.issparse(actual)) assert_array_almost_equal(actual.todense(), expected.todense(), err_msg=label, decimal=5) return # Check types are as expected assert_(types_compatible(expected, actual), "Expected type %s, got %s at %s" % (type(expected), type(actual), label)) # A field in a record array may not be an ndarray # A scalar from a record array will be type np.void if not isinstance(expected, (np.void, np.ndarray, MatlabObject)): assert_equal(expected, actual) return # This is an ndarray-like thing assert_(expected.shape == actual.shape, msg='Expected shape %s, got %s at %s' % (expected.shape, actual.shape, label)) ex_dtype = expected.dtype if ex_dtype.hasobject: # array of objects if isinstance(expected, MatlabObject): assert_equal(expected.classname, actual.classname) for i, ev in enumerate(expected): level_label = "%s, [%d], " % (label, i) _check_level(level_label, ev, actual[i]) return if ex_dtype.fields: # probably recarray for fn in ex_dtype.fields: level_label = "%s, field %s, " % (label, fn) _check_level(level_label, expected[fn], actual[fn]) return if ex_dtype.type in (text_type, # string or bool np.unicode_, np.bool_): assert_equal(actual, expected, err_msg=label) return # Something numeric assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) def _load_check_case(name, files, case): for file_name in files: matdict = loadmat(file_name, struct_as_record=True) label = "test %s; file %s" % (name, file_name) for k, expected in case.items(): k_label = "%s, variable %s" % (label, k) assert_(k in matdict, "Missing key at %s" % k_label) _check_level(k_label, expected, matdict[k]) def _whos_check_case(name, files, case, classes): for file_name in files: label = "test %s; file %s" % (name, file_name) whos = whosmat(file_name) expected_whos = [] for k, expected in case.items(): expected_whos.append((k, expected.shape, classes[k])) whos.sort() expected_whos.sort() assert_equal(whos, expected_whos, "%s: %r != %r" % (label, whos, expected_whos) ) # Round trip tests def _rt_check_case(name, expected, format): mat_stream = BytesIO() savemat(mat_stream, expected, format=format) mat_stream.seek(0) _load_check_case(name, [mat_stream], expected) # generator for load tests def test_load(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) yield _load_check_case, name, files, expected # generator for whos tests def test_whos(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] classes = case['classes'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) yield _whos_check_case, name, files, expected, classes # generator for round trip tests def test_round_trip(): for case in case_table4 + case_table5_rt: case_table4_names = [case['name'] for case in case_table4] name = case['name'] + '_round_trip' expected = case['expected'] for format in (['4', '5'] if case['name'] in case_table4_names else ['5']): yield _rt_check_case, name, expected, format def test_gzip_simple(): xdense = np.zeros((20,20)) xdense[2,3] = 2.3 xdense[4,5] = 4.5 x = SP.csc_matrix(xdense) name = 'gzip_test' expected = {'x':x} format = '4' tmpdir = mkdtemp() try: fname = pjoin(tmpdir,name) mat_stream = gzip.open(fname,mode='wb') savemat(mat_stream, expected, format=format) mat_stream.close() mat_stream = gzip.open(fname,mode='rb') actual = loadmat(mat_stream, struct_as_record=True) mat_stream.close() finally: shutil.rmtree(tmpdir) assert_array_almost_equal(actual['x'].todense(), expected['x'].todense(), err_msg=repr(actual)) def test_multiple_open(): # Ticket #1039, on Windows: check that files are not left open tmpdir = mkdtemp() try: x = dict(x=np.zeros((2, 2))) fname = pjoin(tmpdir, "a.mat") # Check that file is not left open savemat(fname, x) os.unlink(fname) savemat(fname, x) loadmat(fname) os.unlink(fname) # Check that stream is left open f = open(fname, 'wb') savemat(f, x) f.seek(0) f.close() f = open(fname, 'rb') loadmat(f) f.seek(0) f.close() finally: shutil.rmtree(tmpdir) def test_mat73(): # Check any hdf5 files raise an error filenames = glob( pjoin(test_data_path, 'testhdf5*.mat')) assert_(len(filenames) > 0) for filename in filenames: fp = open(filename, 'rb') assert_raises(NotImplementedError, loadmat, fp, struct_as_record=True) fp.close() def test_warnings(): # This test is an echo of the previous behavior, which was to raise a # warning if the user triggered a search for mat files on the Python system # path. We can remove the test in the next version after upcoming (0.13) fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') with warnings.catch_warnings(): warnings.simplefilter('error') # This should not generate a warning mres = loadmat(fname, struct_as_record=True) # This neither mres = loadmat(fname, struct_as_record=False) def test_regression_653(): # Saving a dictionary with only invalid keys used to raise an error. Now we # save this as an empty struct in matlab space. sio = BytesIO() savemat(sio, {'d':{1:2}}, format='5') back = loadmat(sio)['d'] # Check we got an empty struct equivalent assert_equal(back.shape, (1,1)) assert_equal(back.dtype, np.dtype(object)) assert_(back[0,0] is None) def test_structname_len(): # Test limit for length of field names in structs lim = 31 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5') fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5') def test_4_and_long_field_names_incompatible(): # Long field names option not supported in 4 my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) assert_raises(ValueError, savemat, BytesIO(), {'my_struct':my_struct}, format='4', long_field_names=True) def test_long_field_names(): # Test limit for length of field names in structs lim = 63 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) def test_long_field_names_in_struct(): # Regression test - long_field_names was erased if you passed a struct # within a struct lim = 63 fldname = 'a' * lim cell = np.ndarray((1,2),dtype=object) st1 = np.zeros((1,1), dtype=[(fldname, object)]) cell[0,0] = st1 cell[0,1] = st1 savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) # # Check to make sure it fails with long field names off # assert_raises(ValueError, savemat, BytesIO(), {'longstruct': cell}, format='5', long_field_names=False) def test_cell_with_one_thing_in_it(): # Regression test - make a cell array that's 1 x 2 and put two # strings in it. It works. Make a cell array that's 1 x 1 and put # a string in it. It should work but, in the old days, it didn't. cells = np.ndarray((1,2),dtype=object) cells[0,0] = 'Hello' cells[0,1] = 'World' savemat(BytesIO(), {'x': cells}, format='5') cells = np.ndarray((1,1),dtype=object) cells[0,0] = 'Hello, world' savemat(BytesIO(), {'x': cells}, format='5') def test_writer_properties(): # Tests getting, setting of properties of matrix writer mfw = MatFile5Writer(BytesIO()) yield assert_equal, mfw.global_vars, [] mfw.global_vars = ['avar'] yield assert_equal, mfw.global_vars, ['avar'] yield assert_equal, mfw.unicode_strings, False mfw.unicode_strings = True yield assert_equal, mfw.unicode_strings, True yield assert_equal, mfw.long_field_names, False mfw.long_field_names = True yield assert_equal, mfw.long_field_names, True def test_use_small_element(): # Test whether we're using small data element or not sio = BytesIO() wtr = MatFile5Writer(sio) # First check size for no sde for name arr = np.zeros(10) wtr.put_variables({'aaaaa': arr}) w_sz = len(sio.getvalue()) # Check small name results in largish difference in size sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaa': arr}) yield assert_, w_sz - len(sio.getvalue()) > 4 # Whereas increasing name size makes less difference sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaaaa': arr}) yield assert_, len(sio.getvalue()) - w_sz < 4 def test_save_dict(): # Test that dict can be saved (as recarray), loaded as matstruct dict_types = ((dict, False),) try: from collections import OrderedDict except ImportError: pass else: dict_types += ((OrderedDict, True),) ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)]) for dict_type, is_ordered in dict_types: # Initialize with tuples to keep order for OrderedDict d = dict_type([('a', 1), ('b', 2)]) stream = BytesIO() savemat(stream, {'dict': d}) stream.seek(0) vals = loadmat(stream)['dict'] assert_equal(set(vals.dtype.names), set(['a', 'b'])) if is_ordered: # Input was ordered, output in ab order assert_array_equal(vals, ab_exp) else: # Not ordered input, either order output if vals.dtype.names[0] == 'a': assert_array_equal(vals, ab_exp) else: assert_array_equal(vals, ba_exp) def test_1d_shape(): # New 5 behavior is 1D -> row vector arr = np.arange(5) for format in ('4', '5'): # Column is the default stream = BytesIO() savemat(stream, {'oned': arr}, format=format) vals = loadmat(stream) assert_equal(vals['oned'].shape, (1, 5)) # can be explicitly 'column' for oned_as stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='column') vals = loadmat(stream) assert_equal(vals['oned'].shape, (5,1)) # but different from 'row' stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='row') vals = loadmat(stream) assert_equal(vals['oned'].shape, (1,5)) def test_compression(): arr = np.zeros(100).reshape((5,20)) arr[2,10] = 1 stream = BytesIO() savemat(stream, {'arr':arr}) raw_len = len(stream.getvalue()) vals = loadmat(stream) yield assert_array_equal, vals['arr'], arr stream = BytesIO() savemat(stream, {'arr':arr}, do_compression=True) compressed_len = len(stream.getvalue()) vals = loadmat(stream) yield assert_array_equal, vals['arr'], arr yield assert_, raw_len > compressed_len # Concatenate, test later arr2 = arr.copy() arr2[0,0] = 1 stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) vals = loadmat(stream) yield assert_array_equal, vals['arr2'], arr2 stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) vals = loadmat(stream) yield assert_array_equal, vals['arr2'], arr2 def test_single_object(): stream = BytesIO() savemat(stream, {'A':np.array(1, dtype=object)}) def test_skip_variable(): # Test skipping over the first of two variables in a MAT file # using mat_reader_factory and put_variables to read them in. # # This is a regression test of a problem that's caused by # using the compressed file reader seek instead of the raw file # I/O seek when skipping over a compressed chunk. # # The problem arises when the chunk is large: this file has # a 256x256 array of random (uncompressible) doubles. # filename = pjoin(test_data_path,'test_skip_variable.mat') # # Prove that it loads with loadmat # d = loadmat(filename, struct_as_record=True) yield assert_, 'first' in d yield assert_, 'second' in d # # Make the factory # factory = mat_reader_factory(filename, struct_as_record=True) # # This is where the factory breaks with an error in MatMatrixGetter.to_next # d = factory.get_variables('second') yield assert_, 'second' in d factory.mat_stream.close() def test_empty_struct(): # ticket 885 filename = pjoin(test_data_path,'test_empty_struct.mat') # before ticket fix, this would crash with ValueError, empty data # type d = loadmat(filename, struct_as_record=True) a = d['a'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) stream = BytesIO() arr = np.array((), dtype='U') # before ticket fix, this used to give data type not understood savemat(stream, {'arr':arr}) d = loadmat(stream) a2 = d['arr'] assert_array_equal(a2, arr) def test_save_empty_dict(): # saving empty dict also gives empty struct stream = BytesIO() savemat(stream, {'arr': {}}) d = loadmat(stream) a = d['arr'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) def assert_any_equal(output, alternatives): """ Assert `output` is equal to at least one element in `alternatives` """ one_equal = False for expected in alternatives: if np.all(output == expected): one_equal = True break assert_(one_equal) def test_to_writeable(): # Test to_writeable function res = to_writeable(np.array([1])) # pass through ndarrays assert_equal(res.shape, (1,)) assert_array_equal(res, 1) # Dict fields can be written in any order expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) alternatives = (expected1, expected2) assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) # Fields with underscores discarded assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) # Not-string fields discarded assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) # String fields that are valid Python identifiers discarded assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) # Object with field names is equivalent class klass(object): pass c = klass c.a = 1 c.b = 2 assert_any_equal(to_writeable(c), alternatives) # empty list and tuple go to empty array res = to_writeable([]) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) res = to_writeable(()) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) # None -> None assert_(to_writeable(None) is None) # String to strings assert_equal(to_writeable('a string').dtype.type, np.str_) # Scalars to numpy to numpy scalars res = to_writeable(1) assert_equal(res.shape, ()) assert_equal(res.dtype.type, np.array(1).dtype.type) assert_array_equal(res, 1) # Empty dict returns EmptyStructMarker assert_(to_writeable({}) is EmptyStructMarker) # Object does not have (even empty) __dict__ assert_(to_writeable(object()) is None) # Custom object does have empty __dict__, returns EmptyStructMarker class C(object): pass assert_(to_writeable(c()) is EmptyStructMarker) # dict keys with legal characters are convertible res = to_writeable({'a': 1})['a'] assert_equal(res.shape, (1,)) assert_equal(res.dtype.type, np.object_) # Only fields with illegal characters, falls back to EmptyStruct assert_(to_writeable({'1':1}) is EmptyStructMarker) assert_(to_writeable({'_a':1}) is EmptyStructMarker) # Unless there are valid fields, in which case structured array assert_equal(to_writeable({'1':1, 'f': 2}), np.array([(2,)], dtype=[('f', '|O8')])) def test_recarray(): # check roundtrip of structured array dt = [('f1', 'f8'), ('f2', 'S10')] arr = np.zeros((2,), dtype=dt) arr[0]['f1'] = 0.5 arr[0]['f2'] = 'python' arr[1]['f1'] = 99 arr[1]['f2'] = 'not perl' stream = BytesIO() savemat(stream, {'arr': arr}) d = loadmat(stream, struct_as_record=False) a20 = d['arr'][0,0] yield assert_equal, a20.f1, 0.5 yield assert_equal, a20.f2, 'python' d = loadmat(stream, struct_as_record=True) a20 = d['arr'][0,0] yield assert_equal, a20['f1'], 0.5 yield assert_equal, a20['f2'], 'python' # structs always come back as object types yield assert_equal, a20.dtype, np.dtype([('f1', 'O'), ('f2', 'O')]) a21 = d['arr'].flat[1] yield assert_equal, a21['f1'], 99 yield assert_equal, a21['f2'], 'not perl' def test_save_object(): class C(object): pass c = C() c.field1 = 1 c.field2 = 'a string' stream = BytesIO() savemat(stream, {'c': c}) d = loadmat(stream, struct_as_record=False) c2 = d['c'][0,0] assert_equal(c2.field1, 1) assert_equal(c2.field2, 'a string') d = loadmat(stream, struct_as_record=True) c2 = d['c'][0,0] assert_equal(c2['field1'], 1) assert_equal(c2['field2'], 'a string') def test_read_opts(): # tests if read is seeing option sets, at initialization and after # initialization arr = np.arange(6).reshape(1,6) stream = BytesIO() savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) back_dict = rdr.get_variables() rarr = back_dict['a'] assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, squeeze_me=True) assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) rdr.squeeze_me = False assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, byte_order=boc.native_code) assert_array_equal(rdr.get_variables()['a'], arr) # inverted byte code leads to error on read because of swapped # header etc rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) assert_raises(Exception, rdr.get_variables) rdr.byte_order = boc.native_code assert_array_equal(rdr.get_variables()['a'], arr) arr = np.array(['a string']) stream.truncate(0) stream.seek(0) savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) assert_array_equal(rdr.get_variables()['a'], arr) rdr = MatFile5Reader(stream, chars_as_strings=False) carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) assert_array_equal(rdr.get_variables()['a'], carr) rdr.chars_as_strings = True assert_array_equal(rdr.get_variables()['a'], arr) def test_empty_string(): # make sure reading empty string does not raise error estring_fname = pjoin(test_data_path, 'single_empty_string.mat') fp = open(estring_fname, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['a'], np.array([], dtype='U1')) # empty string round trip. Matlab cannot distiguish # between a string array that is empty, and a string array # containing a single empty string, because it stores strings as # arrays of char. There is no way of having an array of char that # is not empty, but contains an empty string. stream = BytesIO() savemat(stream, {'a': np.array([''])}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.truncate(0) stream.seek(0) savemat(stream, {'a': np.array([], dtype='U1')}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.close() def test_corrupted_data(): import zlib for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), (zlib.error, 'corrupted_zlib_checksum.mat')]: with open(pjoin(test_data_path, fname), 'rb') as fp: rdr = MatFile5Reader(fp) assert_raises(exc, rdr.get_variables) def test_corrupted_data_check_can_be_disabled(): with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) rdr.get_variables() def test_read_both_endian(): # make sure big- and little- endian data is read correctly for fname in ('big_endian.mat', 'little_endian.mat'): fp = open(pjoin(test_data_path, fname), 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['strings'], np.array([['hello'], ['world']], dtype=object)) assert_array_equal(d['floats'], np.array([[2., 3.], [3., 4.]], dtype=np.float32)) def test_write_opposite_endian(): # We don't support writing opposite endian .mat files, but we need to behave # correctly if the user supplies an other-endian numpy array to write out float_arr = np.array([[2., 3.], [3., 4.]]) int_arr = np.arange(6).reshape((2, 3)) uni_arr = np.array(['hello', 'world'], dtype='U') stream = BytesIO() savemat(stream, {'floats': float_arr.byteswap().newbyteorder(), 'ints': int_arr.byteswap().newbyteorder(), 'uni_arr': uni_arr.byteswap().newbyteorder()}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['floats'], float_arr) assert_array_equal(d['ints'], int_arr) assert_array_equal(d['uni_arr'], uni_arr) stream.close() def test_logical_array(): # The roundtrip test doesn't verify that we load the data up with the # correct (bool) dtype with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: rdr = MatFile5Reader(fobj, mat_dtype=True) d = rdr.get_variables() x = np.array([[True], [False]], dtype=np.bool_) assert_array_equal(d['testbools'], x) assert_equal(d['testbools'].dtype, x.dtype) def test_logical_out_type(): # Confirm that bool type written as uint8, uint8 class # See gh-4022 stream = BytesIO() barr = np.array([False, True, False]) savemat(stream, {'barray': barr}) stream.seek(0) reader = MatFile5Reader(stream) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) assert_equal(hdr.is_logical, True) var = reader.read_var_array(hdr, False) assert_equal(var.dtype.type, np.uint8) def test_mat4_3d(): # test behavior when writing 3D arrays to matlab 4 files stream = BytesIO() arr = np.arange(24).reshape((2,3,4)) assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') def test_func_read(): func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_(isinstance(d['testfunc'], MatlabFunction)) stream = BytesIO() wtr = MatFile5Writer(stream) assert_raises(MatWriteError, wtr.put_variables, d) def test_mat_dtype(): double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=False) d = rdr.get_variables() fp.close() yield assert_equal, d['testmatrix'].dtype.kind, 'u' fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=True) d = rdr.get_variables() fp.close() yield assert_equal, d['testmatrix'].dtype.kind, 'f' def test_sparse_in_struct(): # reproduces bug found by DC where Cython code was insisting on # ndarray return type, but getting sparse matrix st = {'sparsefield': SP.coo_matrix(np.eye(4))} stream = BytesIO() savemat(stream, {'a':st}) d = loadmat(stream, struct_as_record=True) yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4) def test_mat_struct_squeeze(): stream = BytesIO() in_d = {'st':{'one':1, 'two':2}} savemat(stream, in_d) # no error without squeeze out_d = loadmat(stream, struct_as_record=False) # previous error was with squeeze, with mat_struct out_d = loadmat(stream, struct_as_record=False, squeeze_me=True, ) def test_scalar_squeeze(): stream = BytesIO() in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} savemat(stream, in_d) out_d = loadmat(stream, squeeze_me=True) assert_(isinstance(out_d['scalar'], float)) assert_(isinstance(out_d['string'], string_types)) assert_(isinstance(out_d['st'], np.ndarray)) def test_str_round(): # from report by Angus McMorland on mailing list 3 May 2010 stream = BytesIO() in_arr = np.array(['Hello', 'Foob']) out_arr = np.array(['Hello', 'Foob ']) savemat(stream, dict(a=in_arr)) res = loadmat(stream) # resulted in ['HloolFoa', 'elWrdobr'] assert_array_equal(res['a'], out_arr) stream.truncate(0) stream.seek(0) # Make Fortran ordered version of string in_str = in_arr.tostring(order='F') in_from_str = np.ndarray(shape=a.shape, dtype=in_arr.dtype, order='F', buffer=in_str) savemat(stream, dict(a=in_from_str)) assert_array_equal(res['a'], out_arr) # unicode save did lead to buffer too small error stream.truncate(0) stream.seek(0) in_arr_u = in_arr.astype('U') out_arr_u = out_arr.astype('U') savemat(stream, {'a': in_arr_u}) res = loadmat(stream) assert_array_equal(res['a'], out_arr_u) def test_fieldnames(): # Check that field names are as expected stream = BytesIO() savemat(stream, {'a': {'a':1, 'b':2}}) res = loadmat(stream) field_names = res['a'].dtype.names assert_equal(set(field_names), set(('a', 'b'))) def test_loadmat_varnames(): # Test that we can get just one variable from a mat file using loadmat mat5_sys_names = ['__globals__', '__header__', '__version__'] for eg_file, sys_v_names in ( (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): vars = loadmat(eg_file) assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names='a') assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['a']) assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['theta']) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=('theta',)) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=[]) assert_equal(set(vars.keys()), set(sys_v_names)) vnames = ['theta'] vars = loadmat(eg_file, variable_names=vnames) assert_equal(vnames, ['theta']) def test_round_types(): # Check that saving, loading preserves dtype in most cases arr = np.arange(10) stream = BytesIO() for dts in ('f8','f4','i8','i4','i2','i1', 'u8','u4','u2','u1','c16','c8'): stream.truncate(0) stream.seek(0) # needed for BytesIO in python 3 savemat(stream, {'arr': arr.astype(dts)}) vars = loadmat(stream) assert_equal(np.dtype(dts), vars['arr'].dtype) def test_varmats_from_mat(): # Make a mat file with several variables, write it, read it back names_vars = (('arr', mlarr(np.arange(10))), ('mystr', mlarr('a string')), ('mynum', mlarr(10))) # Dict like thing to give variables in defined order class C(object): def items(self): return names_vars stream = BytesIO() savemat(stream, C()) varmats = varmats_from_mat(stream) assert_equal(len(varmats), 3) for i in range(3): name, var_stream = varmats[i] exp_name, exp_res = names_vars[i] assert_equal(name, exp_name) res = loadmat(var_stream) assert_array_equal(res[name], exp_res) def test_one_by_zero(): # Test 1x0 chars get read correctly func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_equal(d['var'].shape, (0,)) def test_load_mat4_le(): # We were getting byte order wrong when reading little-endian floa64 dense # matrices on big-endian platforms mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') vars = loadmat(mat4_fname) assert_array_equal(vars['a'], [[0.1, 1.2]]) def test_unicode_mat4(): # Mat4 should save unicode as latin1 bio = BytesIO() var = {'second_cat': u('Schrödinger')} savemat(bio, var, format='4') var_back = loadmat(bio) assert_equal(var_back['second_cat'], var['second_cat']) def test_logical_sparse(): # Test we can read logical sparse stored in mat file as bytes. # See https://github.com/scipy/scipy/issues/3539. # In some files saved by MATLAB, the sparse data elements (Real Part # Subelement in MATLAB speak) are stored with apparent type double # (miDOUBLE) but are in fact single bytes. filename = pjoin(test_data_path,'logical_sparse.mat') # Before fix, this would crash with: # ValueError: indices and data should have the same size d = loadmat(filename, struct_as_record=True) log_sp = d['sp_log_5_4'] assert_(isinstance(log_sp, SP.csc_matrix)) assert_equal(log_sp.dtype.type, np.bool_) assert_array_equal(log_sp.toarray(), [[True, True, True, False], [False, False, True, False], [False, False, True, False], [False, False, False, False], [False, False, False, False]]) def test_empty_sparse(): # Can we read empty sparse matrices? sio = BytesIO() import scipy.sparse empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]]) savemat(sio, dict(x=empty_sparse)) sio.seek(0) res = loadmat(sio) assert_array_equal(res['x'].shape, empty_sparse.shape) assert_array_equal(res['x'].todense(), 0) # Do empty sparse matrices get written with max nnz 1? # See https://github.com/scipy/scipy/issues/4208 sio.seek(0) reader = MatFile5Reader(sio) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.nzmax, 1) def test_empty_mat_error(): # Test we get a specific warning for an empty mat file sio = BytesIO() assert_raises(MatReadError, loadmat, sio) def test_miuint32_compromise(): # Reader should accept miUINT32 for miINT32, but check signs # mat file with miUINT32 for miINT32, but OK values filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') res = loadmat(filename) assert_equal(res['an_array'], np.arange(10)[None, :]) # mat file with miUINT32 for miINT32, with negative value filename = pjoin(test_data_path, 'bad_miuint32.mat') with warnings.catch_warnings(record=True): # Py3k ResourceWarning assert_raises(ValueError, loadmat, filename) def test_miutf8_for_miint8_compromise(): # Check reader accepts ascii as miUTF8 for array names filename = pjoin(test_data_path, 'miutf8_array_name.mat') res = loadmat(filename) assert_equal(res['array_name'], [[1]]) # mat file with non-ascii utf8 name raises error filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') with warnings.catch_warnings(record=True): # Py3k ResourceWarning assert_raises(ValueError, loadmat, filename) def test_bad_utf8(): # Check that reader reads bad UTF with 'replace' option filename = pjoin(test_data_path,'broken_utf8.mat') res = loadmat(filename) assert_equal(res['bad_string'], b'\x80 am broken'.decode('utf8', 'replace')) if __name__ == "__main__": run_module_suite()
bsd-3-clause
sschnug/pyeda
pyeda/parsing/pla.py
5
4136
""" PLA This is a partial implementation of the Berkeley PLA format. See extension/espresso/html/espresso.5.html for details. Exceptions: Error Interface Functions: parse """ # Disable 'no-name-in-module', b/c pylint can't look into C extensions # pylint: disable=E0611 import re from pyeda.boolalg.espresso import FTYPE, DTYPE, RTYPE _COMMENT = re.compile(r"^#.*$") _NINS = re.compile(r"^.i\s+(\d+)$") _NOUTS = re.compile(r"^.o\s+(\d+)$") _PROD = re.compile(r"^.p\s+(\d+)$") _ILB = re.compile(r"^.ilb\s+(\w+(?:\s+\w+)*)$") _OB = re.compile(r"^.ob\s+(\w+(?:\s+\w+)*)$") _TYPE = re.compile(r"^.type\s+(f|r|fd|fr|dr|fdr)$") _CUBE = re.compile(r"^([01-]+)\s+([01-]+)$") _END = re.compile(r"^.e(?:nd)?$") _TYPES = { 'f': FTYPE, 'r': RTYPE, 'fd': FTYPE | DTYPE, 'fr': FTYPE | RTYPE, 'dr': DTYPE | RTYPE, 'fdr': FTYPE | DTYPE | RTYPE, } _INCODE = {'0': 1, '1': 2, '-': 3} _OUTCODE = {'0': 0, '1': 1, '-': 2} class Error(Exception): """An error happened during parsing a PLA file""" def parse(s): """ Parse an input string in PLA format, and return an intermediate representation dict. Parameters ---------- s : str String containing a PLA. Returns ------- A dict with all PLA information: =============== ============ ================================= Key Value type Value description =============== ============ ================================= ninputs int Number of inputs noutputs int Number of outputs input_labels list Input variable names output_labels list Output function names intype int Cover type: {F, R, FD, FR, DR, FDR} cover set Implicant table =============== ============ ================================= """ d = dict(ninputs=None, noutputs=None, input_labels=None, output_labels=None, intype=None, cover=set()) lines = [line.strip() for line in s.splitlines()] for i, line in enumerate(lines, start=1): # skip comments if not line or _COMMENT.match(line): continue # .i m_in = _NINS.match(line) if m_in: if d['ninputs'] is None: d['ninputs'] = int(m_in.group(1)) continue else: raise Error(".i declared more than once") # .o m_out = _NOUTS.match(line) if m_out: if d['noutputs'] is None: d['noutputs'] = int(m_out.group(1)) continue else: raise Error(".o declared more than once") # ignore .p m_prod = _PROD.match(line) if m_prod: continue # .ilb m_ilb = _ILB.match(line) if m_ilb: if d['input_labels'] is None: d['input_labels'] = m_ilb.group(1).split() continue else: raise Error(".ilb declared more than once") # .ob m_ob = _OB.match(line) if m_ob: if d['output_labels'] is None: d['output_labels'] = m_ob.group(1).split() continue else: raise Error(".ob declared more than once") # .type m_type = _TYPE.match(line) if m_type: if d['intype'] is None: d['intype'] = _TYPES[m_type.group(1)] continue else: raise Error(".type declared more tha once") # cube m_cube = _CUBE.match(line) if m_cube: inputs, outputs = m_cube.groups() invec = tuple(_INCODE[c] for c in inputs) outvec = tuple(_OUTCODE[c] for c in outputs) d['cover'].add((invec, outvec)) continue # ignore .e m_end = _END.match(line) if m_end: continue raise Error("syntax error on line {}: {}".format(i, line)) return d
bsd-2-clause
watonyweng/horizon
openstack_dashboard/dashboards/project/images/images/urls.py
65
1264
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import patterns from django.conf.urls import url from openstack_dashboard.dashboards.project.images.images import views VIEWS_MOD = 'openstack_dashboard.dashboards.project.images.images.views' urlpatterns = patterns( VIEWS_MOD, url(r'^create/$', views.CreateView.as_view(), name='create'), url(r'^(?P<image_id>[^/]+)/update/$', views.UpdateView.as_view(), name='update'), url(r'^(?P<image_id>[^/]+)/$', views.DetailView.as_view(), name='detail'), )
apache-2.0
janeloveless/mechanics-of-exploration
neuromech/util.py
1
11756
#! /usr/bin/env python import os import itertools as it import sys import textwrap #import gtk import numpy as np import sympy as sy import sympy.stats import odespy as ode import matplotlib import matplotlib.pyplot as plt import sympy.physics.mechanics as mech """ Pretty plotting code. """ _all_spines = ["top", "right", "bottom", "left"] def hide_spines(s=["top", "right"]): """Hides the top and rightmost axis spines from view for all active figures and their respective axes.""" global _all_spines # Retrieve a list of all current figures. figures = [x for x in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] for figure in figures: # Get all Axis instances related to the figure. for ax in figure.canvas.figure.get_axes(): for spine in _all_spines : if spine in s : ax.spines[spine].set_color('none') if "top" in s and "bottom" in s : ax.xaxis.set_ticks_position('none') elif "top" in s : ax.xaxis.set_ticks_position('bottom') elif "bottom" in s : ax.xaxis.set_ticks_position('top') else : ax.xaxis.set_ticks_position('both') if "left" in s and "right" in s : ax.yaxis.set_ticks_position('none') elif "left" in s : ax.yaxis.set_ticks_position('right') elif "right" in s : ax.yaxis.set_ticks_position('left') else : ax.yaxis.set_ticks_position('both') """ FORTRAN compilation code. """ def find_matching_parentheses(s, popen="(", pclose=")") : i_start = s.find(popen) i_end = -1 count = 0 s_frame = s[i_start:] for i in xrange(len(s_frame)) : char = s_frame[i] if char == popen : count += 1 elif char == pclose : count -= 1 if count == 0 : i_end = i + i_start + 1 break return i_start, i_end def parse_merge(H, s) : """ Parse the first FORTRAN merge statement found within s. H is the name of a hidden variable which will be used to store the value of the piecewise function defined by the merge statement. """ # extract bracketed code in merge statement from s # m_statement is of form "(expr1,expr2,cond)" i_merge_start = s.find("merge") ms = s[i_merge_start:] i_start, i_end = find_matching_parentheses(ms) m_statement = ms[i_start:i_end] # print m_statement # extract expr1, expr2, and conditional i1 = m_statement.find(",") i2 = m_statement.rfind(",") expr1 = m_statement[1:i1] expr2 = m_statement[i1 + 1:i2] cond = m_statement[i2 + 1:-1] # if expr1, expr2, or cond are merge statements, recursively call this # function otherwise, set the hidden switch variable to take the value of # the relevant expr if expr1.find("merge") != -1 : expr1_str = parse_merge(H, expr1)[-1] expr1_str = "".join([" " + s + "\n" for s in expr1_str.splitlines()]) else : expr1_str = " " + H + "=" + expr1 if expr2.find("merge") != -1 : expr2_str = parse_merge(H, expr2)[-1] expr2_str = "".join([" " + s + "\n" for s in expr2_str.splitlines()]) else : expr2_str = " " + H + "=" + expr2 # format expr1_str, expr2_str, and cond into a correct FORTRAN IF-THEN-ELSE # statement f_code = " IF (" + cond.strip() + ") THEN \n" + expr1_str + "\n" + \ " ELSE \n" + expr2_str + "\n" + \ " ENDIF \n" return i_merge_start, i_merge_start + i_end, f_code def FORTRAN_f(x, f, parameters=[], verbose=False) : """ Produce FORTRAN function for evaluating a vector-valued SymPy expression f given a state vector x. The FORTRAN function will have the signature f_f77(neq, t, X, Y) where neq is hidden and Y is an output matrix. """ # TODO remove code for dealing with stochastic systems -- it is not used in # this paper x = list(x) + list(parameters) f = list(f) + [0]*len(parameters) rv = list(set((np.concatenate([sy.stats.random_symbols(f_i) for f_i in f])))) NR = len(rv) if NR > 0 : x += [sy.symbols("dt"), sy.symbols("seed")] f += [0, 0] NX = len(x) NY = len(f) if NX != NY : raise Exception("System is not square!") if verbose : print "generating FORTRAN matrices..." _X = sy.tensor.IndexedBase("X", shape=(NX, )) X = [_X[i + 1] for i in xrange(NX)] _R = sy.tensor.IndexedBase("R", shape=(NR, )) R = [_R[i + 1] for i in xrange(NR)] if type(f) != sy.Matrix : f = sy.Matrix(f) # WARNING : These substitution steps are VERY SLOW!!! It might be wise to # parallelise them in the future, or at least substitute into one dynamical # equation at a time so that progress can be monitored. if verbose : print "substituting matrix elements for original state variables and parameters (WARNING: SLOW)..." f_sub = f.subs(zip(x, X)) if verbose : print "substituting matrix elements for random variables (WARNING: SLOW)..." f_sub = f_sub.subs(zip(rv, R)) # generate FORTRAN code if verbose : print "generating FORTRAN code from dynamics equations..." fstrs = [sy.fcode(fi, standard=95) for fi in f_sub] # remove whitespace and newlines if verbose : print "removing whitespace and newlines..." fstrs = ["".join(fi.split()) for fi in fstrs] # remove all @ (FORTRAN line continuation indicator) if verbose : print "removing line continuations..." fstrs = [fi.replace("@", "") for fi in fstrs] # find FORTRAN inline merge statements and replace with a hidden "switch" # variable whose value is set by a full IF statement at the start of the # function call. # -- this is needed because FORTRAN77 doesn't support inline merge statements Hstrs = [] # to hold hidden switch expressions if verbose : print "formatting piecewise functions..." for i in xrange(len(fstrs)) : while fstrs[i].find("merge") != -1 : H = "H(" + str(len(Hstrs) + 1) + ")" i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i]) fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:] Hstrs.append(Hstr) NH = len(Hstrs) # format the fstrs wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True, initial_indent=" ", subsequent_indent=" @ ", width=60) if verbose : print "formatting state equations..." for i in xrange(len(fstrs)) : fstrs[i] = wrapper.fill("Y(" + str(i + 1) + ")=" + fstrs[i]) + "\n" # put the above elements together into a FORTRAN subroutine if verbose : print "formatting preamble..." hdr = " subroutine f_f77(neq, t, X, Y) \n" +\ "Cf2py intent(hide) neq \n" +\ "Cf2py intent(out) Y \n" +\ " integer neq \n" +\ " double precision t, X, Y \n" +\ " dimension X(neq), Y(neq) \n" if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n" # TODO fix the following -- assumes dt = 0.01 # NOTE this is only important when dealing with stochastic systems if NR > 0 : hdr += " real, dimension(" + str(NR) + ") :: R \n" +\ " integer :: SEED \n" +\ " real :: RTRASH \n" +\ " SEED = INT((t/" + sy.fcode(X[-2]).strip() +\ ") + " + sy.fcode(X[-1]).strip() + ") \n" +\ " CALL SRAND(SEED) \n" +\ " DO i=1,4 \n" +\ " RTRASH=RAND(0) \n" +\ " END DO \n" R_block = "".join([sy.fcode(R_i) + "=RAND(0) \n" for R_i in R]) H_block = "".join(Hstrs) Y_block = "".join(fstrs) if verbose : print "assembling source code blocks..." fcode = hdr + R_block + H_block + Y_block + " return \n" + " end \n" # final formatting if verbose : print "final source code formatting..." wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True, initial_indent="", subsequent_indent=" @ ", width=60) fcode = "".join([wrapper.fill(src) + "\n" for src in fcode.split("\n")]) return fcode def FORTRAN_jacobian(x, jac, parameters=[]) : # TODO document # TODO remove this function if unused in paper NX = len(x) NP = len(parameters) Nrowpd = jac.shape[0] Ncolpd = jac.shape[1] if NX != Nrowpd != Ncolpd : raise Exception("System is not square!") _X = sy.tensor.IndexedBase("X", shape=(NX, )) X = [_X[i + 1] for i in xrange(NX)] X = X + [_X[NX + i + 1] for i in xrange(NP)] if type(jac) == sy.Matrix : jac = sy.Matrix(jac) jac_sub = jac.subs(zip(list(x) + list(parameters), X)) ijs = [i for i in it.product(xrange(Nrowpd), xrange(Ncolpd))] # generate FORTRAN code fstrs = [sy.fcode(jac_ij) for jac_ij in jac_sub] # remove whitespace and newlines fstrs = ["".join(jac_ij.split()) for jac_ij in fstrs] # remove all @ (FORTRAN line continuation indicator) fstrs = [jac_ij.replace("@", "") for jac_ij in fstrs] # find FORTRAN inline merge statements and replace with a hidden "switch" # variable whose value is set by a full IF statement at the start of the # function call. # -- this is needed because FORTRAN77 doesn't support inline merge statements Hstrs = [] # to hold hidden switch expressions for i in xrange(len(fstrs)) : while fstrs[i].find("merge") != -1 : H = "H(" + str(len(Hstrs) + 1) + ")" i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i]) fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:] Hstrs.append(Hstr) NH = len(Hstrs) # format the fstrs wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True, initial_indent=" ", subsequent_indent=" @ ", width=60) for k in xrange(len(fstrs)) : i, j = ijs[k] fstrs[k] = wrapper.fill("pd(" + str(i + 1) + "," + str(j + 1) + ")=" + fstrs[k]) + "\n" # put the above elements together into a FORTRAN subroutine hdr = " subroutine jac_f77(neq, t, X, ml, mu, pd, nrowpd) \n" +\ "Cf2py intent(hide) neq, ml, mu, nrowpd \n" +\ "Cf2py intent(out) pd \n" +\ " integer neq, ml, mu, nrowpd \n" +\ " double precision t, X, pd \n" +\ " dimension X(neq), pd(neq, neq) \n" if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n" H_block = "".join(Hstrs) pd_block = "".join(fstrs) fcode = hdr + H_block + pd_block + " return \n" + " end \n" return fcode def FORTRAN_compile(fcode) : f_f77 = ode.compile_f77(fcode) os.remove("tmp_callback.so") # reload(ode) return f_f77 """ Numerical integration code. """ def FORTRAN_integrate(t, x0, f, p0=[], jac=None, rtol=0.0001, atol=0.0001) : solver = ode.Lsodes(f=None, f_f77=f, jac_f77=jac, rtol=rtol, atol=atol) solver.set_initial_condition(list(x0) + list(p0)) x, _ = solver.solve(t) return x
unlicense
ms-iot/python
cpython/Lib/test/test_file.py
83
11367
import sys import os import unittest from array import array from weakref import proxy import io import _pyio as pyio from test.support import TESTFN, run_unittest from collections import UserList class AutoFileTests: # file tests for which a test file is automatically set up def setUp(self): self.f = self.open(TESTFN, 'wb') def tearDown(self): if self.f: self.f.close() os.remove(TESTFN) def testWeakRefs(self): # verify weak references p = proxy(self.f) p.write(b'teststring') self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None self.assertRaises(ReferenceError, getattr, p, 'tell') def testAttributes(self): # verify expected attributes exist f = self.f f.name # merely shouldn't blow up f.mode # ditto f.closed # ditto def testReadinto(self): # verify readinto self.f.write(b'12') self.f.close() a = array('b', b'x'*10) self.f = self.open(TESTFN, 'rb') n = self.f.readinto(a) self.assertEqual(b'12', a.tobytes()[:n]) def testReadinto_text(self): # verify readinto refuses text files a = array('b', b'x'*10) self.f.close() self.f = self.open(TESTFN, 'r') if hasattr(self.f, "readinto"): self.assertRaises(TypeError, self.f.readinto, a) def testWritelinesUserList(self): # verify writelines with instance sequence l = UserList([b'1', b'2']) self.f.writelines(l) self.f.close() self.f = self.open(TESTFN, 'rb') buf = self.f.read() self.assertEqual(buf, b'12') def testWritelinesIntegers(self): # verify writelines with integers self.assertRaises(TypeError, self.f.writelines, [1, 2, 3]) def testWritelinesIntegersUserList(self): # verify writelines with integers in UserList l = UserList([1,2,3]) self.assertRaises(TypeError, self.f.writelines, l) def testWritelinesNonString(self): # verify writelines with non-string object class NonString: pass self.assertRaises(TypeError, self.f.writelines, [NonString(), NonString()]) def testErrors(self): f = self.f self.assertEqual(f.name, TESTFN) self.assertTrue(not f.isatty()) self.assertTrue(not f.closed) if hasattr(f, "readinto"): self.assertRaises((OSError, TypeError), f.readinto, "") f.close() self.assertTrue(f.closed) def testMethods(self): methods = [('fileno', ()), ('flush', ()), ('isatty', ()), ('__next__', ()), ('read', ()), ('write', (b"",)), ('readline', ()), ('readlines', ()), ('seek', (0,)), ('tell', ()), ('write', (b"",)), ('writelines', ([],)), ('__iter__', ()), ] methods.append(('truncate', ())) # __exit__ should close the file self.f.__exit__(None, None, None) self.assertTrue(self.f.closed) for methodname, args in methods: method = getattr(self.f, methodname) # should raise on closed file self.assertRaises(ValueError, method, *args) # file is closed, __exit__ shouldn't do anything self.assertEqual(self.f.__exit__(None, None, None), None) # it must also return None if an exception was given try: 1/0 except: self.assertEqual(self.f.__exit__(*sys.exc_info()), None) def testReadWhenWriting(self): self.assertRaises(OSError, self.f.read) class CAutoFileTests(AutoFileTests, unittest.TestCase): open = io.open class PyAutoFileTests(AutoFileTests, unittest.TestCase): open = staticmethod(pyio.open) class OtherFileTests: def testModeStrings(self): # check invalid mode strings for mode in ("", "aU", "wU+"): try: f = self.open(TESTFN, mode) except ValueError: pass else: f.close() self.fail('%r is an invalid file mode' % mode) def testBadModeArgument(self): # verify that we get a sensible error message for bad mode argument bad_mode = "qwerty" try: f = self.open(TESTFN, bad_mode) except ValueError as msg: if msg.args[0] != 0: s = str(msg) if TESTFN in s or bad_mode not in s: self.fail("bad error message for invalid mode: %s" % s) # if msg.args[0] == 0, we're probably on Windows where there may be # no obvious way to discover why open() failed. else: f.close() self.fail("no error for invalid mode: %s" % bad_mode) def testSetBufferSize(self): # make sure that explicitly setting the buffer size doesn't cause # misbehaviour especially with repeated close() calls for s in (-1, 0, 1, 512): try: f = self.open(TESTFN, 'wb', s) f.write(str(s).encode("ascii")) f.close() f.close() f = self.open(TESTFN, 'rb', s) d = int(f.read().decode("ascii")) f.close() f.close() except OSError as msg: self.fail('error setting buffer size %d: %s' % (s, str(msg))) self.assertEqual(d, s) def testTruncateOnWindows(self): # SF bug <http://www.python.org/sf/801631> # "file.truncate fault on windows" os.unlink(TESTFN) f = self.open(TESTFN, 'wb') try: f.write(b'12345678901') # 11 bytes f.close() f = self.open(TESTFN,'rb+') data = f.read(5) if data != b'12345': self.fail("Read on file opened for update failed %r" % data) if f.tell() != 5: self.fail("File pos after read wrong %d" % f.tell()) f.truncate() if f.tell() != 5: self.fail("File pos after ftruncate wrong %d" % f.tell()) f.close() size = os.path.getsize(TESTFN) if size != 5: self.fail("File size after ftruncate wrong %d" % size) finally: f.close() os.unlink(TESTFN) def testIteration(self): # Test the complex interaction when mixing file-iteration and the # various read* methods. dataoffset = 16384 filler = b"ham\n" assert not dataoffset % len(filler), \ "dataoffset must be multiple of len(filler)" nchunks = dataoffset // len(filler) testlines = [ b"spam, spam and eggs\n", b"eggs, spam, ham and spam\n", b"saussages, spam, spam and eggs\n", b"spam, ham, spam and eggs\n", b"spam, spam, spam, spam, spam, ham, spam\n", b"wonderful spaaaaaam.\n" ] methods = [("readline", ()), ("read", ()), ("readlines", ()), ("readinto", (array("b", b" "*100),))] try: # Prepare the testfile bag = self.open(TESTFN, "wb") bag.write(filler * nchunks) bag.writelines(testlines) bag.close() # Test for appropriate errors mixing read* and iteration for methodname, args in methods: f = self.open(TESTFN, 'rb') if next(f) != filler: self.fail, "Broken testfile" meth = getattr(f, methodname) meth(*args) # This simply shouldn't fail f.close() # Test to see if harmless (by accident) mixing of read* and # iteration still works. This depends on the size of the internal # iteration buffer (currently 8192,) but we can test it in a # flexible manner. Each line in the bag o' ham is 4 bytes # ("h", "a", "m", "\n"), so 4096 lines of that should get us # exactly on the buffer boundary for any power-of-2 buffersize # between 4 and 16384 (inclusive). f = self.open(TESTFN, 'rb') for i in range(nchunks): next(f) testline = testlines.pop(0) try: line = f.readline() except ValueError: self.fail("readline() after next() with supposedly empty " "iteration-buffer failed anyway") if line != testline: self.fail("readline() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) testline = testlines.pop(0) buf = array("b", b"\x00" * len(testline)) try: f.readinto(buf) except ValueError: self.fail("readinto() after next() with supposedly empty " "iteration-buffer failed anyway") line = buf.tobytes() if line != testline: self.fail("readinto() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) testline = testlines.pop(0) try: line = f.read(len(testline)) except ValueError: self.fail("read() after next() with supposedly empty " "iteration-buffer failed anyway") if line != testline: self.fail("read() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) try: lines = f.readlines() except ValueError: self.fail("readlines() after next() with supposedly empty " "iteration-buffer failed anyway") if lines != testlines: self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) f.close() # Reading after iteration hit EOF shouldn't hurt either f = self.open(TESTFN, 'rb') try: for line in f: pass try: f.readline() f.readinto(buf) f.read() f.readlines() except ValueError: self.fail("read* failed after next() consumed file") finally: f.close() finally: os.unlink(TESTFN) class COtherFileTests(OtherFileTests, unittest.TestCase): open = io.open class PyOtherFileTests(OtherFileTests, unittest.TestCase): open = staticmethod(pyio.open) def tearDownModule(): # Historically, these tests have been sloppy about removing TESTFN. # So get rid of it no matter what. if os.path.exists(TESTFN): os.unlink(TESTFN) if __name__ == '__main__': unittest.main()
bsd-3-clause