repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
rmccoy7541/egillettii-rnaseq
scripts/snp_performance_analysis.py
1
3682
#! /bin/env python import sys from optparse import OptionParser import copy import matplotlib matplotlib.use('Agg') import pylab import scipy.optimize import numpy from numpy import array import dadi import os #call ms program from within dadi, using optimized parameters (converted to ms units) core = "-n 1 0.922 -n 2 0.104 -ej 0.0330 2 1 -en 0.0330 1 1" command = dadi.Misc.ms_command(100000, (12,12), core, 1, 2000) ms_fs = dadi.Spectrum.from_ms_file(os.popen(command)) #modify the following line to adjust the sample size of SNPs used for inference scaled_ms_fs = ms_fs.fixed_size_sample(2000) scaled_ms_fs = scaled_ms_fs.fold() #import demographic models import gillettii_models def runModel(outFile, nuW_start, nuC_start, T_start): # Extract the spectrum from ms output fs = scaled_ms_fs ns = fs.sample_sizes print 'sample sizes:', ns # These are the grid point settings will use for extrapolation. pts_l = [20,30,40] # suggested that the smallest grid be slightly larger than the largest sample size. But this may take a long time. # bottleneck_split model func = gillettii_models.bottleneck_split params = array([nuW_start, nuC_start, T_start]) upper_bound = [30, 10, 10] lower_bound = [1e-5, 1e-10, 0] # Make the extrapolating version of the demographic model function. func_ex = dadi.Numerics.make_extrap_func(func) # Calculate the model AFS model = func_ex(params, ns, pts_l) # Calculate likelihood of the data given the model AFS # Likelihood of the data given the model AFS. ll_model = dadi.Inference.ll_multinom(model, fs) print 'Model log-likelihood:', ll_model, "\n" # The optimal value of theta given the model. theta = dadi.Inference.optimal_sfs_scaling(model, fs) p0 = dadi.Misc.perturb_params(params, fold=1, lower_bound=lower_bound, upper_bound=upper_bound) print 'perturbed parameters: ', p0, "\n" popt = dadi.Inference.optimize_log_fmin(p0, fs, func_ex, pts_l, upper_bound=upper_bound, lower_bound=lower_bound, maxiter=None, verbose=len(params)) print 'Optimized parameters:', repr(popt), "\n" #use the optimized parameters in a new model to try to get the parameters to converge new_model = func_ex(popt, ns, pts_l) ll_opt = dadi.Inference.ll_multinom(new_model, fs) print 'Optimized log-likelihood:', ll_opt, "\n" # Write the parameters and log-likelihood to the outFile s = str(nuW_start) + '\t' + str(nuC_start) + '\t' + str(T_start) + '\t' for i in range(0, len(popt)): s += str(popt[i]) + '\t' s += str(ll_opt) + '\n' outFile.write(s) ################# def mkOptionParser(): """ Defines options and returns parser """ usage = """%prog <outFN> <nuW_start> <nuC_start> <T_start> %prog performs demographic inference on gillettii RNA-seq data. """ parser = OptionParser(usage) return parser def main(): """ see usage in mkOptionParser. """ parser = mkOptionParser() options, args= parser.parse_args() if len(args) != 4: parser.error("Incorrect number of arguments") outFN = args[0] nuW_start = float(args[1]) nuC_start = float(args[2]) T_start = float(args[3]) if outFN == '-': outFile = sys.stdout else: outFile = open(outFN, 'a') runModel(outFile, nuW_start, nuC_start, T_start) #run main if __name__ == '__main__': main()
mit
robobrobro/ballin-octo-shame
lib/Python-3.4.3/Lib/encodings/utf_32.py
180
5128
""" Python 'utf-32' Codec """ import codecs, sys ### Codec APIs encode = codecs.utf_32_encode def decode(input, errors='strict'): return codecs.utf_32_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): codecs.IncrementalEncoder.__init__(self, errors) self.encoder = None def encode(self, input, final=False): if self.encoder is None: result = codecs.utf_32_encode(input, self.errors)[0] if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result return self.encoder(input, self.errors)[0] def reset(self): codecs.IncrementalEncoder.reset(self) self.encoder = None def getstate(self): # state info we return to the caller: # 0: stream is in natural order for this platform # 2: endianness hasn't been determined yet # (we're never writing in unnatural order) return (2 if self.encoder is None else 0) def setstate(self, state): if state: self.encoder = None else: if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def __init__(self, errors='strict'): codecs.BufferedIncrementalDecoder.__init__(self, errors) self.decoder = None def _buffer_decode(self, input, errors, final): if self.decoder is None: (output, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, final) if byteorder == -1: self.decoder = codecs.utf_32_le_decode elif byteorder == 1: self.decoder = codecs.utf_32_be_decode elif consumed >= 4: raise UnicodeError("UTF-32 stream does not start with BOM") return (output, consumed) return self.decoder(input, self.errors, final) def reset(self): codecs.BufferedIncrementalDecoder.reset(self) self.decoder = None def getstate(self): # additonal state info from the base class must be None here, # as it isn't passed along to the caller state = codecs.BufferedIncrementalDecoder.getstate(self)[0] # additional state info we pass to the caller: # 0: stream is in natural order for this platform # 1: stream is in unnatural order # 2: endianness hasn't been determined yet if self.decoder is None: return (state, 2) addstate = int((sys.byteorder == "big") != (self.decoder is codecs.utf_32_be_decode)) return (state, addstate) def setstate(self, state): # state[1] will be ignored by BufferedIncrementalDecoder.setstate() codecs.BufferedIncrementalDecoder.setstate(self, state) state = state[1] if state == 0: self.decoder = (codecs.utf_32_be_decode if sys.byteorder == "big" else codecs.utf_32_le_decode) elif state == 1: self.decoder = (codecs.utf_32_le_decode if sys.byteorder == "big" else codecs.utf_32_be_decode) else: self.decoder = None class StreamWriter(codecs.StreamWriter): def __init__(self, stream, errors='strict'): self.encoder = None codecs.StreamWriter.__init__(self, stream, errors) def reset(self): codecs.StreamWriter.reset(self) self.encoder = None def encode(self, input, errors='strict'): if self.encoder is None: result = codecs.utf_32_encode(input, errors) if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result else: return self.encoder(input, errors) class StreamReader(codecs.StreamReader): def reset(self): codecs.StreamReader.reset(self) try: del self.decode except AttributeError: pass def decode(self, input, errors='strict'): (object, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, False) if byteorder == -1: self.decode = codecs.utf_32_le_decode elif byteorder == 1: self.decode = codecs.utf_32_be_decode elif consumed>=4: raise UnicodeError("UTF-32 stream does not start with BOM") return (object, consumed) ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
mit
andela-bojengwa/talk
venv/lib/python2.7/site-packages/rest_framework/viewsets.py
21
5303
""" ViewSets are essentially just a type of class based view, that doesn't provide any method handlers, such as `get()`, `post()`, etc... but instead has actions, such as `list()`, `retrieve()`, `create()`, etc... Actions are only bound to methods at the point of instantiating the views. user_list = UserViewSet.as_view({'get': 'list'}) user_detail = UserViewSet.as_view({'get': 'retrieve'}) Typically, rather than instantiate views from viewsets directly, you'll register the viewset with a router and let the URL conf be determined automatically. router = DefaultRouter() router.register(r'users', UserViewSet, 'user') urlpatterns = router.urls """ from __future__ import unicode_literals from functools import update_wrapper from django.utils.decorators import classonlymethod from django.views.decorators.csrf import csrf_exempt from rest_framework import views, generics, mixins class ViewSetMixin(object): """ This is the magic. Overrides `.as_view()` so that it takes an `actions` keyword that performs the binding of HTTP methods to actions on the Resource. For example, to create a concrete view binding the 'GET' and 'POST' methods to the 'list' and 'create' actions... view = MyViewSet.as_view({'get': 'list', 'post': 'create'}) """ @classonlymethod def as_view(cls, actions=None, **initkwargs): """ Because of the way class based views create a closure around the instantiated view, we need to totally reimplement `.as_view`, and slightly modify the view function that is created and returned. """ # The suffix initkwarg is reserved for identifying the viewset type # eg. 'List' or 'Instance'. cls.suffix = None # actions must not be empty if not actions: raise TypeError("The `actions` argument must be provided when " "calling `.as_view()` on a ViewSet. For example " "`.as_view({'get': 'list'})`") # sanitize keyword arguments for key in initkwargs: if key in cls.http_method_names: raise TypeError("You tried to pass in the %s method name as a " "keyword argument to %s(). Don't do that." % (key, cls.__name__)) if not hasattr(cls, key): raise TypeError("%s() received an invalid keyword %r" % ( cls.__name__, key)) def view(request, *args, **kwargs): self = cls(**initkwargs) # We also store the mapping of request methods to actions, # so that we can later set the action attribute. # eg. `self.action = 'list'` on an incoming GET request. self.action_map = actions # Bind methods to actions # This is the bit that's different to a standard view for method, action in actions.items(): handler = getattr(self, action) setattr(self, method, handler) # Patch this in as it's otherwise only present from 1.5 onwards if hasattr(self, 'get') and not hasattr(self, 'head'): self.head = self.get # And continue as usual return self.dispatch(request, *args, **kwargs) # take name and docstring from class update_wrapper(view, cls, updated=()) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper(view, cls.dispatch, assigned=()) # We need to set these on the view function, so that breadcrumb # generation can pick out these bits of information from a # resolved URL. view.cls = cls view.suffix = initkwargs.get('suffix', None) return csrf_exempt(view) def initialize_request(self, request, *args, **kwargs): """ Set the `.action` attribute on the view, depending on the request method. """ request = super(ViewSetMixin, self).initialize_request(request, *args, **kwargs) self.action = self.action_map.get(request.method.lower()) return request class ViewSet(ViewSetMixin, views.APIView): """ The base ViewSet class does not provide any actions by default. """ pass class GenericViewSet(ViewSetMixin, generics.GenericAPIView): """ The GenericViewSet class does not provide any actions by default, but does include the base set of generic view behavior, such as the `get_object` and `get_queryset` methods. """ pass class ReadOnlyModelViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet): """ A viewset that provides default `list()` and `retrieve()` actions. """ pass class ModelViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, mixins.ListModelMixin, GenericViewSet): """ A viewset that provides default `create()`, `retrieve()`, `update()`, `partial_update()`, `destroy()` and `list()` actions. """ pass
mit
mit0110/oppia
core/tests/test_util_jobs.py
19
4472
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Jobs operating on explorations that can be used for production tests. To use these jobs, first need to register them in jobs_registry (at the moment they are not displayed there to avoid accidental use).""" from core import jobs from core.domain import exp_domain from core.domain import exp_services from core.domain import rights_manager from core.platform import models import feconf (base_models, exp_models,) = models.Registry.import_models([ models.NAMES.base_model, models.NAMES.exploration]) class ExpCopiesRealtimeModel( jobs.BaseRealtimeDatastoreClassForContinuousComputations): pass class ExpCopiesAggregator(jobs.BaseContinuousComputationManager): """A continuous-computation job creating 10 published copies of every existing exploration, with the eid being '[old_eid]copy[copy_number]', title 'Copy' and category 'Copies'. """ @classmethod def get_event_types_listened_to(cls): return [] @classmethod def _get_realtime_datastore_class(cls): return ExpCopiesRealtimeModel @classmethod def _get_batch_job_manager_class(cls): return ExpCopiesMRJobManager @classmethod def _handle_incoming_event(cls, active_realtime_layer, event_type, *args): pass class ExpCopiesMRJobManager( jobs.BaseMapReduceJobManagerForContinuousComputations): """A continuous-computation job creating 10 published copies of every existing exploration, with the eid being '[old_eid]copy[copy_number]', title 'Copy' and category 'Copies'. """ @classmethod def _get_continuous_computation_class(cls): return ExpCopiesAggregator @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if ExpCopiesMRJobManager._entity_created_before_job_queued(item): for count in range(10): yield ('%scopy%d' % (item.id, count), exp_services.get_exploration_from_model(item).to_yaml()) @staticmethod def reduce(exp_id, list_of_exps): for stringified_exp in list_of_exps: exploration = exp_domain.Exploration.from_untitled_yaml( exp_id, 'Copy', 'Copies', stringified_exp) exp_services.save_new_exploration( feconf.SYSTEM_COMMITTER_ID, exploration) rights_manager.publish_exploration( feconf.SYSTEM_COMMITTER_ID, exp_id) # Job to delete all copied explorations. class DeleteExpCopiesRealtimeModel( jobs.BaseRealtimeDatastoreClassForContinuousComputations): pass class DeleteExpCopiesAggregator(jobs.BaseContinuousComputationManager): """A continuous-computation job deleting all explorations in category 'Copies'. """ @classmethod def get_event_types_listened_to(cls): return [] @classmethod def _get_realtime_datastore_class(cls): return DeleteExpCopiesRealtimeModel @classmethod def _get_batch_job_manager_class(cls): return DeleteExpCopiesMRJobManager @classmethod def _handle_incoming_event(cls, active_realtime_layer, event_type, *args): pass class DeleteExpCopiesMRJobManager( jobs.BaseMapReduceJobManagerForContinuousComputations): """Job that deletes all explorations in category 'Copies'. """ @classmethod def _get_continuous_computation_class(cls): return DeleteExpCopiesAggregator @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.category == 'Copies': exp_services.delete_exploration( feconf.SYSTEM_COMMITTER_ID, item.id, force_deletion=True) @staticmethod def reduce(exp_id, list_of_exps): pass
apache-2.0
dakerfp/AutobahnPython
examples/twisted/wamp/basic/rpc/timeservice/backend.py
8
1139
############################################################################### ## ## Copyright (C) 2014 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### import datetime from autobahn.twisted.wamp import ApplicationSession class Component(ApplicationSession): """ A simple time service application component. """ def onJoin(self, details): def utcnow(): now = datetime.datetime.utcnow() return now.strftime("%Y-%m-%dT%H:%M:%SZ") self.register(utcnow, 'com.timeservice.now')
apache-2.0
jelugbo/hebs_master
lms/djangoapps/licenses/tests.py
30
9472
"""Tests for License package""" import logging import json from uuid import uuid4 from random import shuffle from tempfile import NamedTemporaryFile import factory from factory.django import DjangoModelFactory from django.test import TestCase from django.test.client import Client from django.test.utils import override_settings from django.core.management import call_command from django.core.urlresolvers import reverse from nose.tools import assert_true # pylint: disable=E0611 from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from licenses.models import CourseSoftware, UserLicense from student.tests.factories import UserFactory from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase COURSE_1 = 'edX/toy/2012_Fall' SOFTWARE_1 = 'matlab' SOFTWARE_2 = 'stata' SERIAL_1 = '123456abcde' log = logging.getLogger(__name__) class CourseSoftwareFactory(DjangoModelFactory): '''Factory for generating CourseSoftware objects in database''' FACTORY_FOR = CourseSoftware name = SOFTWARE_1 full_name = SOFTWARE_1 url = SOFTWARE_1 course_id = COURSE_1 class UserLicenseFactory(DjangoModelFactory): ''' Factory for generating UserLicense objects in database By default, the user assigned is null, indicating that the serial number has not yet been assigned. ''' FACTORY_FOR = UserLicense user = None software = factory.SubFactory(CourseSoftwareFactory) serial = SERIAL_1 class LicenseTestCase(TestCase): '''Tests for licenses.views''' def setUp(self): '''creates a user and logs in''' # self.setup_viewtest_user() self.user = UserFactory(username='test', email='[email protected]', password='test_password') self.client = Client() assert_true(self.client.login(username='test', password='test_password')) self.software = CourseSoftwareFactory() def test_get_license(self): UserLicenseFactory(user=self.user, software=self.software) response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'false'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(200, response.status_code) json_returned = json.loads(response.content) self.assertFalse('error' in json_returned) self.assertTrue('serial' in json_returned) self.assertEquals(json_returned['serial'], SERIAL_1) def test_get_nonexistent_license(self): response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'false'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(200, response.status_code) json_returned = json.loads(response.content) self.assertFalse('serial' in json_returned) self.assertTrue('error' in json_returned) def test_create_nonexistent_license(self): '''Should not assign a license to an unlicensed user when none are available''' response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'true'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(200, response.status_code) json_returned = json.loads(response.content) self.assertFalse('serial' in json_returned) self.assertTrue('error' in json_returned) def test_create_license(self): '''Should assign a license to an unlicensed user if one is unassigned''' # create an unassigned license UserLicenseFactory(software=self.software) response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'true'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(200, response.status_code) json_returned = json.loads(response.content) self.assertFalse('error' in json_returned) self.assertTrue('serial' in json_returned) self.assertEquals(json_returned['serial'], SERIAL_1) def test_get_license_from_wrong_course(self): response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'false'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format('some/other/course')) self.assertEqual(404, response.status_code) def test_get_license_from_non_ajax(self): response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'false'}, HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(404, response.status_code) def test_get_license_without_software(self): response = self.client.post(reverse('user_software_license'), {'generate': 'false'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) self.assertEqual(404, response.status_code) def test_get_license_without_login(self): self.client.logout() response = self.client.post(reverse('user_software_license'), {'software': SOFTWARE_1, 'generate': 'false'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1)) # if we're not logged in, we should be referred to the login page self.assertEqual(302, response.status_code) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class CommandTest(ModuleStoreTestCase): '''Test management command for importing serial numbers''' def setUp(self): course = CourseFactory.create() self.course_id = course.id def test_import_serial_numbers(self): size = 20 log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1)) with generate_serials_file(size) as temp_file: args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name] call_command('import_serial_numbers', *args) log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2)) with generate_serials_file(size) as temp_file: args = [self.course_id.to_deprecated_string(), SOFTWARE_2, temp_file.name] call_command('import_serial_numbers', *args) log.debug('There should be only 2 course-software entries') software_count = CourseSoftware.objects.all().count() self.assertEqual(2, software_count) log.debug('We added two sets of {0} serials'.format(size)) licenses_count = UserLicense.objects.all().count() self.assertEqual(2 * size, licenses_count) log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1)) with generate_serials_file(size) as temp_file: args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name] call_command('import_serial_numbers', *args) log.debug('There should be still only 2 course-software entries') software_count = CourseSoftware.objects.all().count() self.assertEqual(2, software_count) log.debug('Now we should have 3 sets of 20 serials'.format(size)) licenses_count = UserLicense.objects.all().count() self.assertEqual(3 * size, licenses_count) software = CourseSoftware.objects.get(pk=1) lics = UserLicense.objects.filter(software=software)[:size] known_serials = list(l.serial for l in lics) known_serials.extend(generate_serials(10)) shuffle(known_serials) log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1)) with NamedTemporaryFile() as tmpfile: tmpfile.write('\n'.join(known_serials)) tmpfile.flush() args = [self.course_id.to_deprecated_string(), SOFTWARE_1, tmpfile.name] call_command('import_serial_numbers', *args) log.debug('Check if we added only the new ones') licenses_count = UserLicense.objects.filter(software=software).count() self.assertEqual((2 * size) + 10, licenses_count) def generate_serials(size=20): '''generate a list of serial numbers''' return [str(uuid4()) for _ in range(size)] def generate_serials_file(size=20): '''output list of generated serial numbers to a temp file''' serials = generate_serials(size) temp_file = NamedTemporaryFile() temp_file.write('\n'.join(serials)) temp_file.flush() return temp_file
agpl-3.0
ttroy50/vsid
tools/reset_protocol_model.py
1
2350
#!/usr/bin/python # """ Reset an Protocol in the database to 0 """ import sys import yaml from optparse import OptionParser def reset_protocol(file, dest, name, attributes=None): try: with open(file, 'r') as stream: database = yaml.load(stream) except Exception, ex: print "Exception loading db file : %s" %ex sys.exit(1) if database is None: print "Unable to load yaml %s" %ex sys.exit(1) for proto in database["ProtocolModels"]: if proto["ProtocolName"] == name: print "resetting %s" % proto["ProtocolName"] for meter in proto["AttributeMeters"]: if attributes is not None: if meter["AttributeName"] not in attributes: continue print "resetting %s" % meter["AttributeName"] num = len(meter["FingerPrint"]) for val in range(0, num): meter["FingerPrint"][val] = 0 if dest is not None: with open(dest, 'w') as outfile: outfile.write( yaml.dump(database, default_flow_style=True, explicit_start=True) ) else: print yaml.dump(database, default_flow_style=True, explicit_start=True) def main(): parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="Database file to load", metavar="FILE") parser.add_option("-d", "--dest", dest="destfile", help="Database file to write to. If not supplied will write to stdout", metavar="FILE") parser.add_option("-n", "--name", dest="name", help="Protocol Name", metavar="name") parser.add_option("-a", "--attribute", action="append", dest="attributes", help="Attributes to reset. Not adding this means all") (options, args) = parser.parse_args() if options.filename is None or options.filename == "": print "ERROR: No Database file supplied\n" parser.print_help() sys.exit(1) if options.name is None or options.name == "": print "ERROR: No Name\n" parser.print_help() sys.exit(1) reset_protocol(options.filename, options.destfile, options.name, options.attributes) if __name__ == "__main__": # execute only if run as a script main()
mit
SEL-Columbia/commcare-hq
corehq/apps/reports/tests/test_cache.py
1
5210
import uuid from django.http import HttpRequest from django.test import TestCase from corehq.apps.domain.shortcuts import create_domain from corehq.apps.reports.cache import CacheableRequestMixIn, request_cache from corehq.apps.users.models import WebUser class MockReport(CacheableRequestMixIn): def __init__(self, request, is_cacheable=True): self.request = request self.is_cacheable = is_cacheable @request_cache('v1') def v1(self): return uuid.uuid4().hex @request_cache('v2') def v2(self): return uuid.uuid4().hex BLANK = '__blank__' def _make_request(path=BLANK, domain=BLANK, user=BLANK): request = HttpRequest() if domain != BLANK: request.domain = domain if path != BLANK: request.path = path if user != BLANK: request.couch_user = user return request class ReportCacheTest(TestCase): # note: this is pretty tightly coupled with the internals of the cache # but this is probably ok since that's what it's designed to test domain = 'cache-test' def setUp(self): create_domain(self.domain) self.web_user1 = WebUser.create(self.domain, 'w1', 'secret') self.web_user2 = WebUser.create(self.domain, 'w2', 'secret') def tearDown(self): self.web_user1.delete() self.web_user2.delete() def testBasicFunctionality(self): report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain), self.domain, self.web_user1)) v1 = report.v1() #self.assertEqual(v1, report.v1()) v2 = report.v2() self.assertEqual(v2, report.v2()) self.assertNotEqual(v1, v2) copy = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain), self.domain, self.web_user1)) self.assertEqual(v1, copy.v1()) self.assertEqual(v2, copy.v2()) def testNonCacheable(self): report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain), self.domain, self.web_user1), is_cacheable=False) v1 = report.v1() self.assertNotEqual(v1, report.v1()) self.assertNotEqual(report.v1(), report.v1()) def testPathSpecific(self): report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain), self.domain, self.web_user1)) v1 = report.v1() v2 = report.v1() alternate_paths = [ '/reports/barbar', '/reports/foobars', '/reports/foobar/baz', '/reports/foobar?bip=bop', ] for path in alternate_paths: full_path = '/a/{domain}{path}'.format(domain=self.domain, path=path) alternate = MockReport(_make_request(full_path, self.domain, self.web_user1)) alt_v1 = alternate.v1() self.assertEqual(alt_v1, alternate.v1()) alt_v2 = alternate.v2() self.assertEqual(alt_v2, alternate.v2()) self.assertNotEqual(alt_v1, v1) self.assertNotEqual(alt_v2, v2) def testDomainSpecific(self): path = '/a/{domain}/reports/foobar'.format(domain=self.domain) report = MockReport(_make_request(path, self.domain, self.web_user1)) v1 = report.v1() v2 = report.v1() alternate_domains = [ 'cache', 'cachetest', 'cache-testy', None, BLANK, ] for dom in alternate_domains: alternate = MockReport(_make_request(path, dom, self.web_user1)) alt_v1 = alternate.v1() # since this is invalid, this shouldn't even be caching itself self.assertNotEqual(alt_v1, alternate.v1()) alt_v2 = alternate.v2() self.assertNotEqual(alt_v2, alternate.v2()) self.assertNotEqual(alt_v1, v1) self.assertNotEqual(alt_v2, v2) def testUserSpecific(self): path = '/a/{domain}/reports/foobar'.format(domain=self.domain) report = MockReport(_make_request(path, self.domain, self.web_user1)) v1 = report.v1() v2 = report.v1() alternate = MockReport(_make_request(path, self.domain, self.web_user2)) alt_v1 = alternate.v1() self.assertEqual(alt_v1, alternate.v1()) alt_v2 = alternate.v2() self.assertEqual(alt_v2, alternate.v2()) self.assertNotEqual(alt_v1, v1) self.assertNotEqual(alt_v2, v2) # invalid users shouldn't even be caching themselves for invalid in ['not a user object', None, BLANK]: alternate = MockReport(_make_request(path, self.domain, invalid)) alt_v1 = alternate.v1() # since this is invalid, this shouldn't even be caching itself self.assertNotEqual(alt_v1, alternate.v1()) alt_v2 = alternate.v2() self.assertNotEqual(alt_v2, alternate.v2()) self.assertNotEqual(alt_v1, v1) self.assertNotEqual(alt_v2, v2)
bsd-3-clause
pwarren/AGDeviceControl
agdevicecontrol/thirdparty/site-packages/linux2/twisted/trial/reporter.py
3
15233
# -*- test-case-name: twisted.trial.test.test_trial -*- # # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # # Author: Jonathan D. Simms <[email protected]> # Original Author: Jonathan Lange <[email protected]> from __future__ import generators import sys, types import warnings from twisted.python import reflect, failure, log from twisted.python.compat import adict from twisted.internet import defer from twisted.trial import itrial, util import zope.interface as zi #****************************************************************************** # turn this off if you're having trouble with traceback printouts or some such HIDE_TRIAL_INTERNALS = True #****************************************************************************** # test results, passed as resultType to Reporter.endTest() STATUSES = (SKIP, EXPECTED_FAILURE, FAILURE, ERROR, UNEXPECTED_SUCCESS, SUCCESS) = ("skips", "expectedFailures", "failures", "errors", "unexpectedSuccesses", "successes") WORDS = {SKIP: '[SKIPPED]', EXPECTED_FAILURE: '[TODO]', FAILURE: '[FAIL]', ERROR: '[ERROR]', UNEXPECTED_SUCCESS: '[SUCCESS!?!]', SUCCESS: '[OK]'} LETTERS = {SKIP: 'S', EXPECTED_FAILURE: 'T', FAILURE: 'F', ERROR: 'E', UNEXPECTED_SUCCESS: '!', SUCCESS: '.'} SEPARATOR = '-' * 79 DOUBLE_SEPARATOR = '=' * 79 _basefmt = "caught exception in %s, your TestCase is broken\n\n" SET_UP_CLASS_WARN = _basefmt % 'setUpClass' SET_UP_WARN = _basefmt % 'setUp' TEAR_DOWN_WARN = _basefmt % 'tearDown' TEAR_DOWN_CLASS_WARN = _basefmt % 'tearDownClass' DIRTY_REACTOR_POLICY_WARN = "This failure will cause all methods in your class to be reported as ERRORs in the summary" UNCLEAN_REACTOR_WARN = "REACTOR UNCLEAN! traceback(s) follow: " PASSED, FAILED = "PASSED", "FAILED" methNameWarnMsg = adict(setUpClass = SET_UP_CLASS_WARN, setUp = SET_UP_WARN, tearDown = TEAR_DOWN_WARN, tearDownClass = TEAR_DOWN_CLASS_WARN) # ---------------------------------------------------------------------------- def makeLoggingMethod(name, f): def loggingMethod(*a, **kw): print "%s.%s(*%r, **%r)" % (name, f.func_name, a, kw) return f(*a, **kw) return loggingMethod class MethodCallLoggingType(type): def __new__(cls, name, bases, attrs): for (k, v) in attrs.items(): if isinstance(v, types.FunctionType): attrs[k] = makeLoggingMethod(name, v) return super(MethodCallLoggingType, cls).__new__(cls, name, bases, attrs) class TestStatsBase(object): zi.implements(itrial.ITestStats) importErrors = None def __init__(self, original): #print "original: %r" % (original,) self.original = original def _collect(self): raise NotImplementedError, "should be overridden in subclasses" def get_skips(self): return self._collect(SKIP) def get_errors(self): return self._collect(ERROR) def get_failures(self): return self._collect(FAILURE) def get_expectedFailures(self): return self._collect(EXPECTED_FAILURE) def get_unexpectedSuccesses(self): return self._collect(UNEXPECTED_SUCCESS) def get_successes(self): return self._collect(SUCCESS) def runningTime(self): o = self.original return o.endTime - o.startTime runningTime = property(runningTime) class TestStats(TestStatsBase): # this adapter is used for both TestSuite and TestModule objects importErrors = property(lambda self: getattr(self.original, 'couldNotImport', {}).items()) def _collect(self, status): meths = [] for r in self.original.children: meths.extend(r.methodsWithStatus.get(status, [])) return meths def numTests(self): n = 0 for r in self.original.children: ts = itrial.ITestStats(r) n += ts.numTests() return n def allPassed(self): for r in self.original.children: if not itrial.ITestStats(r).allPassed: return False if getattr(self.original, 'couldNotImport', False): return False return True allPassed = property(allPassed) class TestCaseStats(TestStatsBase): def _collect(self, status): """return a list of all TestMethods with status""" return self.original.methodsWithStatus.get(status, []) def numTests(self): n = len(self.original.children) return n def allPassed(self): for status in (ERROR, FAILURE): if status in self.original.methodsWithStatus: return False return True allPassed = property(allPassed) class DocTestRunnerStats(TestCaseStats): def numTests(self): """DocTestRunners are singleton runners""" return 1 class BrokenTestCaseWarning(Warning): """emitted as a warning when an exception occurs in one of setUp, tearDown, setUpClass, or tearDownClass""" class Reporter(object): zi.implements(itrial.IReporter) debugger = None def __init__(self, stream=sys.stdout, tbformat='default', args=None, realtime=False): self.stream = stream self.tbformat = tbformat self.args = args self.realtime = realtime super(Reporter, self).__init__(stream, tbformat, args, realtime) def setUpReporter(self): return defer.succeed(None) def tearDownReporter(self): return defer.succeed(None) def startTest(self, method): pass def reportImportError(self, name, exc): pass def write(self, format, *args): s = str(format) assert isinstance(s, type('')) if args: self.stream.write(s % args) else: self.stream.write(s) self.stream.flush() def startModule(self, name): pass def startClass(self, klass): pass def endModule(self, module): pass def endClass(self, klass): pass def emitWarning(self, message, category=UserWarning, stacklevel=0): warnings.warn(message, category, stacklevel - 1) def upDownError(self, userMeth, warn=True, printStatus=True): if warn: minfo = itrial.IMethodInfo(userMeth) tbStr = '\n'.join([e.getTraceback() for e in userMeth.errors]) # if not e.check(unittest.SkipTest)]) log.msg(tbStr) msg = "%s%s" % (methNameWarnMsg[minfo.name], tbStr) warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2) def cleanupErrors(self, errs): warnings.warn("%s\n%s" % (UNCLEAN_REACTOR_WARN, '\n'.join(map(self._formatFailureTraceback, errs))), BrokenTestCaseWarning) def endTest(self, method): method = itrial.ITestMethod(method) if self.realtime: for err in method.errors + method.failures: err.printTraceback(self.stream) def _formatFailureTraceback(self, fail): # Short term hack if isinstance(fail, str): return fail detailLevel = self.tbformat result = fail.getTraceback(detail=detailLevel, elideFrameworkCode=True) if detailLevel == 'default': # Apparently trial's tests doen't like the 'Traceback:' line. result = '\n'.join(result.split('\n')[1:]) return result def _formatImportError(self, name, error): """format an import error for report in the summary section of output @param name: The name of the module which could not be imported @param error: The exception which occurred on import @rtype: str """ ret = [DOUBLE_SEPARATOR, '\nIMPORT ERROR:\n\n'] if isinstance(error, failure.Failure): what = self._formatFailureTraceback(error) elif type(error) == types.TupleType: what = error.args[0] else: what = "%s\n" % error ret.append("Could not import %s: \n%s\n" % (name, what)) return ''.join(ret) def _formatFailedTest(self, name, status, failures, skipMsg=None, todoMsg=None): ret = [DOUBLE_SEPARATOR, '%s: %s\n' % (WORDS[status], name)] if skipMsg: ret.append(self._formatFailureTraceback(skipMsg) + '\n') if todoMsg: ret.append(todoMsg + '\n') if status not in (SUCCESS, SKIP, UNEXPECTED_SUCCESS): ret.extend(map(self._formatFailureTraceback, failures)) return '\n'.join(ret) def _reportStatus(self, tsuite): tstats = itrial.ITestStats(tsuite) summaries = [] for stat in STATUSES: num = len(getattr(tstats, "get_%s" % stat)()) if num: summaries.append('%s=%d' % (stat, num)) summary = (summaries and ' ('+', '.join(summaries)+')') or '' if tstats.get_failures() or tstats.get_errors(): status = FAILED else: status = PASSED self.write("%s%s\n", status, summary) def _reportFailures(self, tstats): for meth in getattr(tstats, "get_%s" % SKIP)(): self.write(self._formatFailedTest( meth.fullName, meth.status, meth.errors + meth.failures, meth.skip, itrial.ITodo(meth.todo).msg)) for status in [EXPECTED_FAILURE, FAILURE, ERROR]: for meth in getattr(tstats, "get_%s" % status)(): if meth.hasTbs: self.write(self._formatFailedTest( meth.fullName, meth.status, meth.errors + meth.failures, meth.skip, itrial.ITodo(meth.todo).msg)) for name, error in tstats.importErrors: self.write(self._formatImportError(name, error)) def endSuite(self, suite): tstats = itrial.ITestStats(suite) self.write("\n") self._reportFailures(tstats) self.write("%s\n" % SEPARATOR) self.write('Ran %d tests in %.3fs\n', tstats.numTests(), tstats.runningTime) self.write('\n') self._reportStatus(suite) class MinimalReporter(Reporter): def endSuite(self, suite): tstats = itrial.ITestStats(suite) t = (tstats.runningTime, tstats.numTests(), tstats.numTests(), # XXX: expectedTests == runTests len(tstats.importErrors), len(tstats.get_errors()), len(tstats.get_failures()), len(tstats.get_skips())) self.stream.write(' '.join(map(str,t))+'\n') class TextReporter(Reporter): def __init__(self, stream=sys.stdout, tbformat='default', args=None, realtime=False): super(TextReporter, self).__init__(stream, tbformat, args, realtime) self.seenModules, self.seenClasses = {}, {} def endTest(self, method): self.write(LETTERS.get(itrial.ITestMethod(method).status, '?')) super(TextReporter, self).endTest(method) class VerboseTextReporter(TextReporter): # This is actually the bwverbose option def startTest(self, method): tm = itrial.ITestMethod(method) # XXX this is a crap workaround for doctests, # there should be a better solution. try: klass = reflect.qual(tm.klass) except AttributeError: # not a real class klass = str(tm.klass) self.write('%s (%s) ... ', tm.name, klass) super(VerboseTextReporter, self).startTest(method) def endTest(self, method): self.write("%s\n" % WORDS.get(itrial.ITestMethod(method).status, "[??]")) class TimingTextReporter(VerboseTextReporter): def endTest(self, method): self.write("%s" % WORDS.get(method.status, "[??]") + " " + "(%.03f secs)\n" % method.runningTime()) class TreeReporter(VerboseTextReporter): #__metaclass__ = MethodCallLoggingType currentLine = '' columns = 79 BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 def __init__(self, stream=sys.stdout, tbformat='default', args=None, realtime=False): super(TreeReporter, self).__init__(stream, tbformat, args, realtime) self.words = {SKIP: ('[SKIPPED]', self.BLUE), EXPECTED_FAILURE: ('[TODO]', self.BLUE), FAILURE: ('[FAIL]', self.RED), ERROR: ('[ERROR]', self.RED), UNEXPECTED_SUCCESS: ('[SUCCESS!?!]', self.RED), SUCCESS: ('[OK]', self.GREEN)} def _getText(self, status): return self.words.get(status, ('[??]', self.BLUE)) def write(self, format, *args): if args: format = format % args self.currentLine = format super(TreeReporter, self).write(self.currentLine) def startModule(self, module): modName = module.__name__ if modName not in self.seenModules: self.seenModules[modName] = 1 self.write(' %s\n' % modName) def startClass(self, klass): clsName = klass.__name__ qualifiedClsName = reflect.qual(klass) if qualifiedClsName not in self.seenClasses: self.seenClasses[qualifiedClsName] = 1 self.write(' %s\n' % clsName) def cleanupErrors(self, errs): self.write(self.color(' cleanup errors', self.RED)) self.endLine(*self._getText(ERROR)) super(TreeReporter, self).cleanupErrors(errs) def upDownError(self, method, warn=True, printStatus=True): m = itrial.IMethodInfo(method) self.write(self.color(" %s" % m.name, self.RED)) if printStatus: self.endLine(*self._getText(ERROR)) super(TreeReporter, self).upDownError(method, warn, printStatus) def startTest(self, method): tm = itrial.ITestMethod(method) if tm.docstr: # inspect trims whitespace on the left; the lstrip here is # for those odd folks who start docstrings with a blank line. what = tm.docstr.lstrip().split('\n', 1)[0] else: what = tm.name self.write(' %s ... ', what) def endTest(self, method): Reporter.endTest(self, method) tm = itrial.ITestMethod(method) self.endLine(*self._getText(tm.status)) def color(self, text, color): return '%s%s;1m%s%s0m' % ('\x1b[', color, text, '\x1b[') def endLine(self, message, color): spaces = ' ' * (self.columns - len(self.currentLine) - len(message)) super(TreeReporter, self).write(spaces) super(TreeReporter, self).write("%s\n" % (self.color(message, color),))
gpl-2.0
eebssk1/CAF_MSM_Kernel_msm8916_64
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
frreiss/tensorflow-fred
tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
14
7183
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.MatrixTriangularSolve.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.platform import test def MakePlaceholder(x): return array_ops.placeholder(dtypes.as_dtype(x.dtype), shape=x.shape) class MatrixTriangularSolveOpTest(xla_test.XLATestCase): # MatrixTriangularSolve defined for float64, float32, complex64, complex128 # (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve) @property def float_types(self): return set(super(MatrixTriangularSolveOpTest, self).float_types).intersection( (np.float64, np.float32, np.complex64, np.complex128)) def _VerifyTriangularSolveBase(self, sess, placeholder_a, placeholder_ca, placeholder_b, a, clean_a, b, verification, atol): feed_dict = {placeholder_a: a, placeholder_ca: clean_a, placeholder_b: b} verification_np = sess.run(verification, feed_dict) broadcasted_shape = a.shape[:-2] + (b.shape[-2], b.shape[-1]) broadcasted_b = b + np.zeros(shape=broadcasted_shape, dtype=b.dtype) self.assertAllClose(broadcasted_b, verification_np, atol=atol) def _VerifyTriangularSolve(self, a, b, lower, adjoint, atol): clean_a = np.tril(a) if lower else np.triu(a) with self.session() as sess: placeholder_a = MakePlaceholder(a) placeholder_ca = MakePlaceholder(clean_a) placeholder_b = MakePlaceholder(b) with self.test_scope(): x = linalg_ops.matrix_triangular_solve( placeholder_a, placeholder_b, lower=lower, adjoint=adjoint) verification = test_util.matmul_without_tf32( placeholder_ca, x, adjoint_a=adjoint) self._VerifyTriangularSolveBase(sess, placeholder_a, placeholder_ca, placeholder_b, a, clean_a, b, verification, atol) def _VerifyTriangularSolveCombo(self, a, b, atol=1e-4): transp = lambda x: np.swapaxes(x, -1, -2) for lower, adjoint in itertools.product([True, False], repeat=2): self._VerifyTriangularSolve( a if lower else transp(a), b, lower, adjoint, atol) def testBasic(self): rng = np.random.RandomState(0) a = np.tril(rng.randn(5, 5)) b = rng.randn(5, 7) for dtype in self.float_types: self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype)) def testBasicNotActuallyTriangular(self): rng = np.random.RandomState(0) a = rng.randn(5, 5) # the `a` matrix is not lower-triangular b = rng.randn(5, 7) for dtype in self.float_types: self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype)) def testBasicComplexDtypes(self): if xla_test.test.is_built_with_rocm(): # The folowing subtest invokes the call to "BlasTrsm" # That operation is currently not supported on the ROCm platform self.skipTest("BlasTrsm op for complex types is not supported in ROCm") rng = np.random.RandomState(0) a = np.tril(rng.randn(5, 5) + rng.randn(5, 5) * 1j) b = rng.randn(5, 7) + rng.randn(5, 7) * 1j for dtype in self.complex_types: self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype)) def testBatch(self): rng = np.random.RandomState(0) shapes = [((4, 3, 3), (4, 3, 5)), ((1, 2, 2), (1, 2, 1)), ((1, 1, 1), (1, 1, 2)), ((2, 3, 4, 4), (2, 3, 4, 1))] tuples = itertools.product(self.float_types, shapes) for dtype, (a_shape, b_shape) in tuples: n = a_shape[-1] a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n) b = rng.randn(*b_shape) self._VerifyTriangularSolveCombo( a.astype(dtype), b.astype(dtype), atol=1e-3) def testBatchBroadcast(self): rng = np.random.RandomState(0) shapes = [((3, 3), (4, 3, 5)), ((1, 2, 2), (3, 2, 1)), ((1, 1), (1, 1, 2)), ((1, 3, 4, 4), (2, 1, 4, 1))] tuples = itertools.product(self.float_types, shapes) for dtype, (a_shape, b_shape) in tuples: n = a_shape[-1] a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n) b = rng.randn(*b_shape) self._VerifyTriangularSolveCombo( a.astype(dtype), b.astype(dtype), atol=1e-3) def testLarge(self): n = 1024 rng = np.random.RandomState(0) a = np.tril(rng.rand(n, n) - 0.5) / (2.0 * n) + np.eye(n) b = rng.randn(n, n) self._VerifyTriangularSolve( a.astype(np.float32), b.astype(np.float32), True, False, 1e-4) @test_util.disable_mlir_bridge("Error handling") def testNonSquareCoefficientMatrix(self): rng = np.random.RandomState(0) for dtype in self.float_types: a = rng.randn(3, 4).astype(dtype) b = rng.randn(4, 4).astype(dtype) with self.test_scope(): with self.assertRaises((ValueError, errors.InvalidArgumentError)): linalg_ops.matrix_triangular_solve(a, b) @test_util.run_v2_only # Different error types @test_util.disable_mlir_bridge("Error handling") def testWrongDimensionsV2(self): randn = np.random.RandomState(0).randn for dtype in self.float_types: lhs = constant_op.constant(randn(3, 3), dtype=dtype) rhs = constant_op.constant(randn(4, 3), dtype=dtype) with self.assertRaises(errors.InvalidArgumentError): linalg_ops.matrix_triangular_solve(lhs, rhs) with self.assertRaises(errors.InvalidArgumentError): linalg_ops.matrix_triangular_solve(lhs, rhs) @test_util.run_v1_only("Different error types") @test_util.disable_mlir_bridge("Error handling") def testWrongDimensionsV1(self): randn = np.random.RandomState(0).randn for dtype in self.float_types: lhs = constant_op.constant(randn(3, 3), dtype=dtype) rhs = constant_op.constant(randn(4, 3), dtype=dtype) with self.assertRaises(ValueError): linalg_ops.matrix_triangular_solve(lhs, rhs) with self.assertRaises(ValueError): linalg_ops.matrix_triangular_solve(lhs, rhs) if __name__ == "__main__": test.main()
apache-2.0
awatts/boto
boto/sdb/db/model.py
152
10158
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.sdb.db.property import Property from boto.sdb.db.key import Key from boto.sdb.db.query import Query import boto from boto.compat import filter class ModelMeta(type): "Metaclass for all Models" def __init__(cls, name, bases, dict): super(ModelMeta, cls).__init__(name, bases, dict) # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!) cls.__sub_classes__ = [] # Do a delayed import to prevent possible circular import errors. from boto.sdb.db.manager import get_manager try: if filter(lambda b: issubclass(b, Model), bases): for base in bases: base.__sub_classes__.append(cls) cls._manager = get_manager(cls) # look for all of the Properties and set their names for key in dict.keys(): if isinstance(dict[key], Property): property = dict[key] property.__property_config__(cls, key) prop_names = [] props = cls.properties() for prop in props: if not prop.__class__.__name__.startswith('_'): prop_names.append(prop.name) setattr(cls, '_prop_names', prop_names) except NameError: # 'Model' isn't defined yet, meaning we're looking at our own # Model class, defined below. pass class Model(object): __metaclass__ = ModelMeta __consistent__ = False # Consistent is set off by default id = None @classmethod def get_lineage(cls): l = [c.__name__ for c in cls.mro()] l.reverse() return '.'.join(l) @classmethod def kind(cls): return cls.__name__ @classmethod def _get_by_id(cls, id, manager=None): if not manager: manager = cls._manager return manager.get_object(cls, id) @classmethod def get_by_id(cls, ids=None, parent=None): if isinstance(ids, list): objs = [cls._get_by_id(id) for id in ids] return objs else: return cls._get_by_id(ids) get_by_ids = get_by_id @classmethod def get_by_key_name(cls, key_names, parent=None): raise NotImplementedError("Key Names are not currently supported") @classmethod def find(cls, limit=None, next_token=None, **params): q = Query(cls, limit=limit, next_token=next_token) for key, value in params.items(): q.filter('%s =' % key, value) return q @classmethod def all(cls, limit=None, next_token=None): return cls.find(limit=limit, next_token=next_token) @classmethod def get_or_insert(key_name, **kw): raise NotImplementedError("get_or_insert not currently supported") @classmethod def properties(cls, hidden=True): properties = [] while cls: for key in cls.__dict__.keys(): prop = cls.__dict__[key] if isinstance(prop, Property): if hidden or not prop.__class__.__name__.startswith('_'): properties.append(prop) if len(cls.__bases__) > 0: cls = cls.__bases__[0] else: cls = None return properties @classmethod def find_property(cls, prop_name): property = None while cls: for key in cls.__dict__.keys(): prop = cls.__dict__[key] if isinstance(prop, Property): if not prop.__class__.__name__.startswith('_') and prop_name == prop.name: property = prop if len(cls.__bases__) > 0: cls = cls.__bases__[0] else: cls = None return property @classmethod def get_xmlmanager(cls): if not hasattr(cls, '_xmlmanager'): from boto.sdb.db.manager.xmlmanager import XMLManager cls._xmlmanager = XMLManager(cls, None, None, None, None, None, None, None, False) return cls._xmlmanager @classmethod def from_xml(cls, fp): xmlmanager = cls.get_xmlmanager() return xmlmanager.unmarshal_object(fp) def __init__(self, id=None, **kw): self._loaded = False # first try to initialize all properties to their default values for prop in self.properties(hidden=False): try: setattr(self, prop.name, prop.default_value()) except ValueError: pass if 'manager' in kw: self._manager = kw['manager'] self.id = id for key in kw: if key != 'manager': # We don't want any errors populating up when loading an object, # so if it fails we just revert to it's default value try: setattr(self, key, kw[key]) except Exception as e: boto.log.exception(e) def __repr__(self): return '%s<%s>' % (self.__class__.__name__, self.id) def __str__(self): return str(self.id) def __eq__(self, other): return other and isinstance(other, Model) and self.id == other.id def _get_raw_item(self): return self._manager.get_raw_item(self) def load(self): if self.id and not self._loaded: self._manager.load_object(self) def reload(self): if self.id: self._loaded = False self._manager.load_object(self) def put(self, expected_value=None): """ Save this object as it is, with an optional expected value :param expected_value: Optional tuple of Attribute, and Value that must be the same in order to save this object. If this condition is not met, an SDBResponseError will be raised with a Confict status code. :type expected_value: tuple or list :return: This object :rtype: :class:`boto.sdb.db.model.Model` """ self._manager.save_object(self, expected_value) return self save = put def put_attributes(self, attrs): """ Save just these few attributes, not the whole object :param attrs: Attributes to save, key->value dict :type attrs: dict :return: self :rtype: :class:`boto.sdb.db.model.Model` """ assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save" for prop_name in attrs: value = attrs[prop_name] prop = self.find_property(prop_name) assert(prop), "Property not found: %s" % prop_name self._manager.set_property(prop, self, prop_name, value) self.reload() return self def delete_attributes(self, attrs): """ Delete just these attributes, not the whole object. :param attrs: Attributes to save, as a list of string names :type attrs: list :return: self :rtype: :class:`boto.sdb.db.model.Model` """ assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete." self._manager.domain.delete_attributes(self.id, attrs) self.reload() return self save_attributes = put_attributes def delete(self): self._manager.delete_object(self) def key(self): return Key(obj=self) def set_manager(self, manager): self._manager = manager def to_dict(self): props = {} for prop in self.properties(hidden=False): props[prop.name] = getattr(self, prop.name) obj = {'properties': props, 'id': self.id} return {self.__class__.__name__: obj} def to_xml(self, doc=None): xmlmanager = self.get_xmlmanager() doc = xmlmanager.marshal_object(self, doc) return doc @classmethod def find_subclass(cls, name): """Find a subclass with a given name""" if name == cls.__name__: return cls for sc in cls.__sub_classes__: r = sc.find_subclass(name) if r is not None: return r class Expando(Model): def __setattr__(self, name, value): if name in self._prop_names: object.__setattr__(self, name, value) elif name.startswith('_'): object.__setattr__(self, name, value) elif name == 'id': object.__setattr__(self, name, value) else: self._manager.set_key_value(self, name, value) object.__setattr__(self, name, value) def __getattr__(self, name): if not name.startswith('_'): value = self._manager.get_key_value(self, name) if value: object.__setattr__(self, name, value) return value raise AttributeError
mit
guewen/odoo
addons/account/project/wizard/account_analytic_journal_report.py
378
3164
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv class account_analytic_journal_report(osv.osv_memory): _name = 'account.analytic.journal.report' _description = 'Account Analytic Journal' _columns = { 'date1': fields.date('Start of period', required=True), 'date2': fields.date('End of period', required=True), 'analytic_account_journal_id': fields.many2many('account.analytic.journal', 'account_analytic_journal_name', 'journal_line_id', 'journal_print_id', 'Analytic Journals', required=True), } _defaults = { 'date1': lambda *a: time.strftime('%Y-01-01'), 'date2': lambda *a: time.strftime('%Y-%m-%d') } def check_report(self, cr, uid, ids, context=None): if context is None: context = {} data = self.read(cr, uid, ids)[0] ids_list = [] if context.get('active_id',False): ids_list.append(context.get('active_id',False)) else: record = self.browse(cr,uid,ids[0],context=context) for analytic_record in record.analytic_account_journal_id: ids_list.append(analytic_record.id) datas = { 'ids': ids_list, 'model': 'account.analytic.journal', 'form': data } context2 = context.copy() context2['active_model'] = 'account.analytic.journal' context2['active_ids'] = ids_list return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticjournal', data=datas, context=context2) def default_get(self, cr, uid, fields, context=None): if context is None: context = {} res = super(account_analytic_journal_report, self).default_get(cr, uid, fields, context=context) if not context.has_key('active_ids'): journal_ids = self.pool.get('account.analytic.journal').search(cr, uid, [], context=context) else: journal_ids = context.get('active_ids') if 'analytic_account_journal_id' in fields: res.update({'analytic_account_journal_id': journal_ids}) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
3dfxsoftware/cbss-addons
vauxoo_reports/sale_order_report/report/amd_computadoras_sale_report.py
1
1740
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # [email protected] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from report import report_sxw from osv import osv from tools.translate import _ from report import pyPdf class amd_computadoras_sale(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(amd_computadoras_sale, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, 'hello': self._hello, }) def _hello(self,p): print "estoy en hello" output = pyPdf.PdfFileWriter() print output return "Hello World %s" % output report_sxw.report_sxw( 'report.sale_m321_c_report', 'sale.order', 'addons/sale_order_report/report/amd_computadoras_sale_report.rml', parser=amd_computadoras_sale)
gpl-2.0
webu/django-cms
cms/test_utils/project/emailuserapp/forms.py
61
3574
# -*- coding: utf-8 -*- from django import forms from django.contrib.auth import get_user_model from django.contrib.auth.forms import ReadOnlyPasswordHashField from .models import EmailUser class UserCreationForm(forms.ModelForm): """ A form for creating a new user, including the required email and password fields. """ error_messages = { 'duplicate_email': "A user with that email already exists.", 'password_mismatch': "The two password fields didn't match.", } email = forms.EmailField( label='Email', help_text="Required. Standard format email address.", ) password1 = forms.CharField( label='Password', widget=forms.PasswordInput ) password2 = forms.CharField( label='Password confirmation', widget=forms.PasswordInput, help_text="Enter the same password as above, for verification." ) class Meta: model = EmailUser fields = ('email',) def clean_email(self): # Since User.username is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. email = self.cleaned_data["email"] User = get_user_model() try: User._default_manager.get(email=email) except User.DoesNotExist: return email raise forms.ValidationError( self.error_messages['duplicate_email'], code='duplicate_email', ) def clean_password2(self): # check that the two passwords match password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) return password2 def save(self, commit=True): # Save the provided password in hashed format user = super(UserCreationForm, self).save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user class UserChangeForm(forms.ModelForm): """ A form for updating users, including all fields on the user, but replaces the password field with admin's password hash display field. """ email = forms.EmailField( label='Email', help_text = "Required. Standard format email address.", ) password = ReadOnlyPasswordHashField(label="Password", help_text="Raw passwords are not stored, so there is no way to see " "this user's password, but you can change the password " "using <a href=\"password/\">this form</a>.") class Meta: model = EmailUser fields = ('email', 'password', 'first_name', 'last_name', 'is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions', 'last_login', 'date_joined') def __init__(self, *args, **kwargs): super(UserChangeForm, self).__init__(*args, **kwargs) f = self.fields.get('user_permissions', None) if f is not None: f.queryset = f.queryset.select_related('content_type') def clean_password(self): """ Regardless of what the user provides, return the initial value. This is done here, rather than on the field, because the field does not have access to the inital value. """ return self.initial["password"]
bsd-3-clause
TimofeyFox/GT-S7270_kernel
tools/perf/scripts/python/syscall-counts-by-pid.py
11180
1927
# system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: syscalls[common_comm][common_pid][id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events by comm/pid:\n\n", print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].iteritems(), \ key = lambda(k, v): (v, k), reverse = True): print " %-38s %10d\n" % (syscall_name(id), val),
gpl-2.0
Fafou/Sick-Beard
sickbeard/clients/requests/packages/urllib3/contrib/ntlmpool.py
262
4740
# urllib3/contrib/ntlmpool.py # Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ try: from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection from logging import getLogger from ntlm import ntlm from urllib3 import HTTPSConnectionPool log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = 'https' def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' % (self.num_connections, self.host, self.authurl)) headers = {} headers['Connection'] = 'Keep-Alive' req_header = 'Authorization' resp_header = 'www-authenticate' conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = ( 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % reshdr) log.debug('Response data: %s [...]' % res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(', ') auth_header_value = None for s in auth_header_values: if s[:5] == 'NTLM ': auth_header_value = s[5:] if auth_header_value is None: raise Exception('Unexpected %s response header: %s' % (resp_header, reshdr[resp_header])) # Send authentication message ServerChallenge, NegotiateFlags = \ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags) headers[req_header] = 'NTLM %s' % auth_msg log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % dict(res.getheaders())) log.debug('Response data: %s [...]' % res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception('Server rejected request: wrong ' 'username or password') raise Exception('Wrong server response: %s %s' % (res.status, res.reason)) res.fp = None log.debug('Connection established') return conn def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True): if headers is None: headers = {} headers['Connection'] = 'Keep-Alive' return super(NTLMConnectionPool, self).urlopen(method, url, body, headers, retries, redirect, assert_same_host)
gpl-3.0
dpayne9000/Rubixz-Coin
qa/rpc-tests/test_script_address2.py
1
4079
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test new Rubixzcoin multisig prefix functionality. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import decimal class ScriptAddress2Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, [])) self.nodes.append(start_node(1, self.options.tmpdir, [])) self.nodes.append(start_node(2, self.options.tmpdir, [])) connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) self.is_network_split = False self.sync_all() def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some blocks self.nodes[1].generate(100) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 blocks") addr = self.nodes[0].getnewaddress() addr2 = self.nodes[0].getnewaddress() multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount") assert_equal(multisig_addr[0], 'Q') # Send to a new multisig address txid = self.nodes[1].sendtoaddress(multisig_addr, 1) block = self.nodes[1].generate(3) self.sync_all() tx = self.nodes[2].getrawtransaction(txid, 1) dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0], tx["vout"][1]['scriptPubKey']['addresses'][0]] assert(multisig_addr in dest_addrs) # Spend from the new multisig address addr3 = self.nodes[1].getnewaddress() txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8) block = self.nodes[0].generate(2) self.sync_all() assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2) assert(self.nodes[1].listtransactions()[-1]['address'] == addr3) # Send to an old multisig address. The api addmultisigaddress # can only generate a new address so we manually compute # multisig_addr_old beforehand using an old client. priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89", "cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"] addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v", "mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"] self.nodes[0].importprivkey(priv_keys[0]) self.nodes[0].importprivkey(priv_keys[1]) multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2") assert_equal(multisig_addr_new, "QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe") multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq" ## Let's send to the old address. We can then find it in the ## new address with the new client. So basically the old ## address and the new one are the same thing. txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1) block = self.nodes[1].generate(1) self.sync_all() tx = self.nodes[2].getrawtransaction(txid, 1) dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0], tx["vout"][1]['scriptPubKey']['addresses'][0]] assert(multisig_addr_new in dest_addrs) assert(multisig_addr_old not in dest_addrs) # Spend from the new multisig address addr4 = self.nodes[1].getnewaddress() txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8) block = self.nodes[0].generate(2) self.sync_all() assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2) assert(self.nodes[1].listtransactions()[-1]['address'] == addr4) if __name__ == '__main__': ScriptAddress2Test().main()
mit
sightmachine/simplecv2-facerecognizer
examples/mustachinator.py
1
1560
#!/usr/bin/python from operator import add from simplecv.core.camera import Camera from simplecv.display import Display from simplecv.factory import Factory cam = Camera() display = Display((800,600)) counter = 0 # load the cascades face_cascade = HaarCascade("face") nose_cascade = HaarCascade("nose") stache = Image("stache.png", sample=True) # load the stache mask = stache.createAlphaMask() # load the stache mask count = 0 while display.isNotDone(): img = cam.getImage() img = img.scale(.5) #use a smaller image faces = img.find_haar_features(face_cascade) #find faces if( faces is not None ): # if we have a face faces = faces.sort_area() #get the biggest one face = faces[-1] myFace = face.crop() # get the face image noses = myFace.find_haar_features(nose_cascade) #find the nose if( noses is not None ):# if we have a nose noses = noses.sort_area() nose = noses[0] # get the biggest # these get the upper left corner of the face/nose with respect to original image xf = face.x -(face.get_width()/2) yf = face.y -(face.get_height()/2) xm = nose.x -(nose.get_width()/2) ym = nose.y -(nose.get_height()/2) #calculate the mustache position xmust = xf+xm-(stache.width/2)+(nose.get_width()/2) ymust = yf+ym+(2*nose.get_height()/3) #blit the stache/mask onto the image img = img.blit(stache,pos=(xmust,ymust),mask = mask) img.save(display) #display
bsd-2-clause
UoK-Psychology/Openethics
root/tests/views.py
1
5023
from django.test import TestCase from django.core.urlresolvers import reverse from django.contrib.auth.models import User from mock import patch, call from ethicsapplication.models import EthicsApplication class IndexViewTestCase(TestCase): def setUp(self): self.user = User.objects.create_user('test', '[email protected]', 'testpass') self.user.save() def test_user_not_logged_in(self): ''' If a non logged in user does a get request to the index url they should sent directly to the index page ''' response = self.client.get(reverse('index_view')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'index.html') #assert context self.assertFalse('active_applications' in response.context) def test_user_is_logged_in_has_active_applications(self): ''' If a user is logged in then the context should include: active_applications : applications that the user is the pi for an are in the state *with reviewer* applications_in_review: applications that the user is the pi for and are in the state *awaiting_approval* approved_applications: applications that the user is the pi for and are in the state *approved* these lists are generated using the get_applications_for_principle_investigator function passing in the specific state filter. This function is mocked in this test. ''' ''' Below we do a mock up for get_active_applications() and hardwire for it to return what we expect before each call. This means it can run as a unit test in isolation of EthicsApplicationManager ''' with patch('ethicsapplication.models.EthicsApplicationManager.get_applications_for_principle_investigator') as manager_mock: manager_mock.return_value = [] #set what value we want the call the get_active_applicaitons() to return below.. #have a user, and be logged in #get request to the index page self.client.login(username='test', password='testpass') response = self.client.get(reverse('index_view')) #the context returned by a call to index_view will include the result #of a call to get_active_applications. #assert 200 #assert the template self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'index.html') #assert context self.assertEqual(response.context['active_applications'], []) self.assertEqual(response.context['applications_in_review'], []) self.assertEqual(response.context['approved_applications'], []) #assert that manager_mock is called self.assertTrue(call(self.user, 'with_researcher') in manager_mock.mock_calls) self.assertTrue(call(self.user, 'awaiting_approval') in manager_mock.mock_calls) self.assertTrue(call(self.user, 'approved') in manager_mock.mock_calls) def test_user_is_logged_in_has_applications_for_review(self): ''' If the user has got applications that they are the reviewer for, which are in the state '*awaiting approval* then they should be listed in the context as applications_for_review ''' with patch('ethicsapplication.models.EthicsApplicationManager.get_applications_for_reviewer') as manager_mock: application_for_review = EthicsApplication.objects.create(title='test', principle_investigator=self.user) manager_mock.return_value = [application_for_review] #set what value we want the call the get_active_applicaitons() to return below.. #have a user, and be logged in #get request to the index page self.client.login(username='test', password='testpass') response = self.client.get(reverse('index_view')) #the context returned by a call to index_view will include the result #of a call to get_active_applications. #assert 200 #assert the template self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'index.html') #assert context self.assertTrue('applications_for_review' in response.context) self.assertEqual(response.context['applications_for_review'], [application_for_review] ) #assert that manager_mock is called manager_mock.assert_called_with(self.user, 'awaiting_approval')
gpl-3.0
fabada/pootle
pootle/apps/pootle_app/project_tree.py
5
16115
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import errno import logging import os import re from django.conf import settings from pootle.core.log import store_log, STORE_RESURRECTED from pootle.core.utils.timezone import datetime_min from pootle_app.models.directory import Directory from pootle_language.models import Language from pootle_store.models import Store from pootle_store.util import absolute_real_path, relative_real_path #: Case insensitive match for language codes LANGCODE_RE = re.compile('^[a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?$', re.IGNORECASE) #: Case insensitive match for language codes as postfix LANGCODE_POSTFIX_RE = re.compile('^.*?[-_.]([a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?)$', re.IGNORECASE) def direct_language_match_filename(language_code, path_name): name, ext = os.path.splitext(os.path.basename(path_name)) if name == language_code or name.lower() == language_code.lower(): return True # Check file doesn't match another language. if Language.objects.filter(code__iexact=name).count(): return False detect = LANGCODE_POSTFIX_RE.split(name) return (len(detect) > 1 and (detect[1] == language_code or detect[1].lower() == language_code.lower())) def match_template_filename(project, filename): """Test if :param:`filename` might point at a template file for a given :param:`project`. """ name, ext = os.path.splitext(os.path.basename(filename)) #FIXME: is the test for matching extension redundant? if ext == os.path.extsep + project.get_template_filetype(): if ext != os.path.extsep + project.localfiletype: # Template extension is distinct, surely file is a template. return True elif not find_lang_postfix(filename): # File name can't possibly match any language, assume it is a # template. return True return False def get_matching_language_dirs(project_dir, language): return [lang_dir for lang_dir in os.listdir(project_dir) if language.code == lang_dir] def get_non_existant_language_dir(project_dir, language, file_style, make_dirs): if file_style == "gnu": return project_dir elif make_dirs: language_dir = os.path.join(project_dir, language.code) os.mkdir(language_dir) return language_dir else: raise IndexError("Directory not found for language %s, project %s" % (language.code, project_dir)) def get_or_make_language_dir(project_dir, language, file_style, make_dirs): matching_language_dirs = get_matching_language_dirs(project_dir, language) if len(matching_language_dirs) == 0: # If no matching directories can be found, check if it is a GNU-style # project. return get_non_existant_language_dir(project_dir, language, file_style, make_dirs) else: return os.path.join(project_dir, matching_language_dirs[0]) def get_language_dir(project_dir, language, file_style, make_dirs): language_dir = os.path.join(project_dir, language.code) if not os.path.exists(language_dir): return get_or_make_language_dir(project_dir, language, file_style, make_dirs) else: return language_dir def get_translation_project_dir(language, project_dir, file_style, make_dirs=False): """Returns the base directory containing translations files for the project. :param make_dirs: if ``True``, project and language directories will be created as necessary. """ if file_style == 'gnu': return project_dir else: return get_language_dir(project_dir, language, file_style, make_dirs) def is_hidden_file(path): return path[0] == '.' def split_files_and_dirs(ignored_files, ext, real_dir, file_filter): files = [] dirs = [] for child_path in [child_path for child_path in os.listdir(real_dir) if child_path not in ignored_files and not is_hidden_file(child_path)]: full_child_path = os.path.join(real_dir, child_path) if (os.path.isfile(full_child_path) and full_child_path.endswith(ext) and file_filter(full_child_path)): files.append(child_path) elif os.path.isdir(full_child_path): dirs.append(child_path) return files, dirs def add_items(fs_items_set, db_items, create_or_resurrect_db_item, parent): """Add/make obsolete the database items to correspond to the filesystem. :param fs_items_set: items (dirs, files) currently in the filesystem :param db_items: dict (name, item) of items (dirs, stores) currently in the database :create_or_resurrect_db_item: callable that will create a new db item or resurrect an obsolete db item with a given name and parent. :parent: parent db directory for the items :return: list of all items, list of newly added items :rtype: tuple """ items = [] new_items = [] db_items_set = set(db_items) items_to_delete = db_items_set - fs_items_set items_to_create = fs_items_set - db_items_set for name in items_to_delete: db_items[name].makeobsolete() if len(items_to_delete) > 0: parent.update_all_cache() for vfolder_treeitem in parent.vfolder_treeitems: vfolder_treeitem.update_all_cache() for name in db_items_set - items_to_delete: items.append(db_items[name]) for name in items_to_create: item = create_or_resurrect_db_item(name) items.append(item) new_items.append(item) try: item.save() except Exception: logging.exception('Error while adding %s', item) return items, new_items def create_or_resurrect_store(file, parent, name, translation_project): """Create or resurrect a store db item with given name and parent.""" try: store = Store.objects.get(parent=parent, name=name) store.obsolete = False store.file_mtime = datetime_min if store.last_sync_revision is None: store.last_sync_revision = store.get_max_unit_revision() store_log(user='system', action=STORE_RESURRECTED, path=store.pootle_path, store=store.id) except Store.DoesNotExist: store = Store(file=file, parent=parent, name=name, translation_project=translation_project) store.mark_all_dirty() return store def create_or_resurrect_dir(name, parent): """Create or resurrect a directory db item with given name and parent.""" try: dir = Directory.objects.get(parent=parent, name=name) dir.obsolete = False except Directory.DoesNotExist: dir = Directory(name=name, parent=parent) dir.mark_all_dirty() return dir # TODO: rename function or even rewrite it def add_files(translation_project, ignored_files, ext, relative_dir, db_dir, file_filter=lambda _x: True): podir_path = to_podir_path(relative_dir) files, dirs = split_files_and_dirs(ignored_files, ext, podir_path, file_filter) file_set = set(files) dir_set = set(dirs) existing_stores = dict((store.name, store) for store in db_dir.child_stores.live().exclude(file='') .iterator()) existing_dirs = dict((dir.name, dir) for dir in db_dir.child_dirs.live().iterator()) files, new_files = add_items( file_set, existing_stores, lambda name: create_or_resurrect_store( file=os.path.join(relative_dir, name), parent=db_dir, name=name, translation_project=translation_project, ), db_dir, ) db_subdirs, new_db_subdirs = add_items( dir_set, existing_dirs, lambda name: create_or_resurrect_dir(name=name, parent=db_dir), db_dir, ) is_empty = len(files) == 0 for db_subdir in db_subdirs: fs_subdir = os.path.join(relative_dir, db_subdir.name) _files, _new_files, _is_empty = \ add_files(translation_project, ignored_files, ext, fs_subdir, db_subdir, file_filter) files += _files new_files += _new_files is_empty &= _is_empty if is_empty: db_dir.makeobsolete() return files, new_files, is_empty def to_podir_path(path): path = relative_real_path(path) return os.path.join(settings.POOTLE_TRANSLATION_DIRECTORY, path) def find_lang_postfix(filename): """Finds the language code at end of a filename.""" name = os.path.splitext(os.path.basename(filename))[0] if LANGCODE_RE.match(name): return name match = LANGCODE_POSTFIX_RE.match(name) if match: return match.groups()[0] for code in Language.objects.values_list('code', flat=True): if (name.endswith('-'+code) or name.endswith('_'+code) or name.endswith('.'+code) or name.lower().endswith('-'+code.lower()) or name.endswith('_'+code) or name.endswith('.'+code)): return code def translation_project_should_exist(language, project): """Tests if there are translation files corresponding to the given :param:`language` and :param:`project`. """ if project.get_treestyle() == "gnu": # GNU style projects are tricky if language.code == 'templates': # Language is template look for template files for dirpath, dirnames, filenames in os.walk(project.get_real_path()): for filename in filenames: if project.file_belongs_to_project(filename, match_templates=True) and \ match_template_filename(project, filename): return True else: # find files with the language name in the project dir for dirpath, dirnames, filenames in os.walk(project.get_real_path()): for filename in filenames: #FIXME: don't reuse already used file if project.file_belongs_to_project(filename, match_templates=False) and \ direct_language_match_filename(language.code, filename): return True else: # find directory with the language name in the project dir try: dirpath, dirnames, filename = os.walk(project.get_real_path()).next() if language.code in dirnames: return True except StopIteration: pass return False def init_store_from_template(translation_project, template_store): """Initialize a new file for `translation_project` using `template_store`. """ if translation_project.file_style == 'gnu': target_pootle_path, target_path = get_translated_name_gnu(translation_project, template_store) else: target_pootle_path, target_path = get_translated_name(translation_project, template_store) # Create the missing directories for the new TP. target_dir = os.path.dirname(target_path) if not os.path.exists(target_dir): os.makedirs(target_dir) output_file = template_store.file.store output_file.settargetlanguage(translation_project.language.code) output_file.savefile(target_path) def get_translated_name_gnu(translation_project, store): """Given a template :param:`store` and a :param:`translation_project` return target filename. """ pootle_path_parts = store.pootle_path.split('/') pootle_path_parts[1] = translation_project.language.code pootle_path = '/'.join(pootle_path_parts[:-1]) if not pootle_path.endswith('/'): pootle_path = pootle_path + '/' suffix = translation_project.language.code + os.extsep + \ translation_project.project.localfiletype # try loading file first try: target_store = translation_project.stores.live().get( parent__pootle_path=pootle_path, name__iexact=suffix, ) return (target_store.pootle_path, target_store.file and target_store.file.path) except Store.DoesNotExist: target_store = None # is this GNU-style with prefix? use_prefix = (store.parent.child_stores.live().exclude(file="").count() > 1 or translation_project.stores.live().exclude(name__iexact=suffix, file='').count()) if not use_prefix: # let's make sure for tp in translation_project.project.translationproject_set.exclude(language__code='templates').iterator(): temp_suffix = tp.language.code + os.extsep + translation_project.project.localfiletype if tp.stores.live().exclude(name__iexact=temp_suffix).exclude(file="").count(): use_prefix = True break if use_prefix: if store.translation_project.language.code == 'templates': tprefix = os.path.splitext(store.name)[0] #FIXME: we should detect separator prefix = tprefix + '-' else: prefix = os.path.splitext(store.name)[0][:-len(store.translation_project.language.code)] tprefix = prefix[:-1] try: target_store = translation_project.stores.live().filter( parent__pootle_path=pootle_path, name__in=[ tprefix + '-' + suffix, tprefix + '_' + suffix, tprefix + '.' + suffix, tprefix + '-' + suffix.lower(), tprefix + '_' + suffix.lower(), tprefix + '.' + suffix.lower(), ], )[0] return (target_store.pootle_path, target_store.file and target_store.file.path) except (Store.DoesNotExist, IndexError): pass else: prefix = "" if store.file: path_parts = store.file.path.split(os.sep) name = prefix + suffix path_parts[-1] = name pootle_path_parts[-1] = name else: path_parts = store.parent.get_real_path().split(os.sep) path_parts.append(store.name) return '/'.join(pootle_path_parts), os.sep.join(path_parts) def get_translated_name(translation_project, store): name, ext = os.path.splitext(store.name) if store.file: path_parts = store.file.name.split(os.sep) else: path_parts = store.parent.get_real_path().split(os.sep) path_parts.append(store.name) pootle_path_parts = store.pootle_path.split('/') # Replace language code path_parts[1] = translation_project.language.code pootle_path_parts[1] = translation_project.language.code # Replace extension path_parts[-1] = name + '.' + translation_project.project.localfiletype pootle_path_parts[-1] = name + '.' + \ translation_project.project.localfiletype return ('/'.join(pootle_path_parts), absolute_real_path(os.sep.join(path_parts))) def does_not_exist(path): if os.path.exists(path): return False try: os.stat(path) # what the hell? except OSError as e: if e.errno == errno.ENOENT: # explicit no such file or directory return True
gpl-3.0
NMGRL/pychron
pychron/mv/diode_locator.py
2
1121
# =============================================================================== # Copyright 2012 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from pychron.mv.locator import Locator class DiodeLocator(Locator): pass # ============= EOF =============================================
apache-2.0
StudTeam6/competition
sw/ground_segment/python/udp_link/datalink_to_w5100.py
89
1441
#!/usr/bin/python import os import sys import socket import struct from optparse import OptionParser sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/lib/python") parser = OptionParser() parser.add_option("-d", "--destip", dest="dest_addr", help="Destination IP for messages picked up from local socket", default="192.168.25.47") parser.add_option("-p", "--destport", dest="dest_port", default=1234, help="Destination UDP port to send messages to") parser.add_option("-l", "--localport", dest="local_port", default=4243, help="Local port to listen to for UDP messages") (options, args) = parser.parse_args() msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) msock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) msock.bind(("", int(options.local_port))) # mreq = struct.pack("4sl", socket.inet_aton(telemip), socket.INADDR_ANY) # msock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) # initialize a socket, think of it as a cable # SOCK_DGRAM specifies that this is UDP destsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) while( 1 ): data = None try: data, addr = msock.recvfrom(1024) format = 'B' * (len(data)) strdata = struct.unpack( format, data ) print len( strdata ), ":", strdata # send the command destsock.sendto( data, (options.dest_addr, options.dest_port) ) except socket.error, e: print 'Exception', e
gpl-2.0
faux123/private_msm8660_ics
scripts/rt-tester/rt-tester.py
11005
5307
#!/usr/bin/python # # rt-mutex tester # # (C) 2006 Thomas Gleixner <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0]): # Separate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c": cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
gpl-2.0
idjaw/cliff
cliff/formatters/base.py
3
1507
"""Base classes for formatters. """ import abc import six @six.add_metaclass(abc.ABCMeta) class Formatter(object): @abc.abstractmethod def add_argument_group(self, parser): """Add any options to the argument parser. Should use our own argument group. """ @six.add_metaclass(abc.ABCMeta) class ListFormatter(Formatter): """Base class for formatters that know how to deal with multiple objects. """ @abc.abstractmethod def emit_list(self, column_names, data, stdout, parsed_args): """Format and print the list from the iterable data source. :param column_names: names of the columns :param data: iterable data source, one tuple per object with values in order of column names :param stdout: output stream where data should be written :param parsed_args: argparse namespace from our local options """ @six.add_metaclass(abc.ABCMeta) class SingleFormatter(Formatter): """Base class for formatters that work with single objects. """ @abc.abstractmethod def emit_one(self, column_names, data, stdout, parsed_args): """Format and print the values associated with the single object. :param column_names: names of the columns :param data: iterable data source with values in order of column names :param stdout: output stream where data should be written :param parsed_args: argparse namespace from our local options """
apache-2.0
coll-gate/collgate
server/main/language.py
1
4780
# -*- coding: utf-8; -*- # # @file language.py # @brief Views related to the language type. # @author Frédéric SCHERMA (INRA UMR1095) # @date 2016-09-01 # @copyright Copyright (c) 2016 INRA/CIRAD # @license MIT (see LICENSE file) # @details from django.core.exceptions import SuspiciousOperation from django.shortcuts import get_object_or_404 from django.utils import translation from django.utils.translation import ugettext_lazy as _ from igdectk.rest.handler import * from igdectk.rest.response import HttpResponseRest from main.cache import cache_manager from messenger.cache import client_cache_manager from .models import InterfaceLanguages, Language from .main import RestMain class RestLanguage(RestMain): regex = r'^language/$' suffix = 'language' class RestLanguageCode(RestLanguage): regex = r'^(?P<code>[a-zA-Z]{2}([_-][a-zA-Z]{2})*)/$' suffix = 'code' class RestLanguageCodeLabel(RestLanguageCode): regex = r'^label/$' suffix = 'label' class RestUI(RestMain): regex = r'^ui/$' suffix = 'ui' class RestUILanguage(RestUI): regex = r'^language/$' suffix = 'language' @RestLanguage.def_request(Method.GET, Format.JSON) def get_languages(request): """ Get the list of languages for the entities in JSON """ lang = translation.get_language() cache_name = 'languages:%s' % lang languages = cache_manager.get('main', cache_name) if languages: return HttpResponseRest(request, languages) languages = [] for language in Language.objects.all().order_by('code'): languages.append({ 'id': language.code, 'value': language.code, 'label': language.get_label() }) # cache for 24h cache_manager.set('main', cache_name, languages, 60*60*24) return HttpResponseRest(request, languages) @RestLanguage.def_admin_request(Method.POST, Format.JSON, content={ "type": "object", "properties": { "code": Language.CODE_VALIDATOR, "label": Language.LABEL_VALIDATOR }, }, staff=True ) def post_language(request): """ Create an new language for data. """ code = request.data['code'] label = request.data['label'] lang = translation.get_language() language = Language() language.code = code language.set_label(lang, request.data['label']) language.save() results = { 'id': code, 'value': code, 'label': label } return HttpResponseRest(request, results) @RestLanguageCode.def_admin_request(Method.DELETE, Format.JSON, staff=True) def delete_language(request, code): language = get_object_or_404(Language, code=code) # do we allow delete because of data consistency ? # it is not really a problem because the code is a standard language.delete() return HttpResponseRest(request, {}) @RestLanguageCodeLabel.def_auth_request(Method.GET, Format.JSON) def get_all_labels_of_language(request, code): """ Returns labels for each language related to the user interface. """ language = get_object_or_404(Language, code=code) label_dict = language.label # complete with missing languages for lang, lang_label in InterfaceLanguages.choices(): if lang not in label_dict: label_dict[lang] = "" results = label_dict return HttpResponseRest(request, results) @RestLanguageCodeLabel.def_admin_request(Method.PUT, Format.JSON, content={ "type": "object", "additionalProperties": Language.LABEL_VALIDATOR }, staff=True) def change_language_labels(request, code): language = get_object_or_404(Language, code=code) labels = request.data languages_values = [lang[0] for lang in InterfaceLanguages.choices()] for lang, label in labels.items(): if lang not in languages_values: raise SuspiciousOperation(_("Unsupported language identifier")) language.label = labels language.save() result = { 'label': language.get_label() } return HttpResponseRest(request, result) @RestUILanguage.def_request(Method.GET, Format.JSON) def get_ui_languages(request): """ Get the list of languages for the UI in JSON """ lang = translation.get_language() cache_name = 'ui-languages:%s' % lang results = cache_manager.get('main', cache_name) if results: return results languages = [] for language in InterfaceLanguages: languages.append({ 'id': language.value, 'value': language.value, 'label': str(language.label) }) # cache for 24h cache_manager.set('main', cache_name, results, 60*60*24) return HttpResponseRest(request, languages)
mit
PacktPublishing/Mastering-Mesos
Chapter4/Aurora/src/test/python/apache/aurora/common/test_cluster_option.py
14
2562
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from optparse import OptionParser import pytest from apache.aurora.common.cluster import Cluster from apache.aurora.common.cluster_option import ClusterOption from apache.aurora.common.clusters import Clusters CLUSTER_LIST = Clusters(( Cluster(name='smf1'), Cluster(name='smf1-test'), )) def cluster_provider(name): return CLUSTER_LIST[name] def test_constructors(): ClusterOption('--test', '-t', help="Test cluster.", clusters=CLUSTER_LIST) ClusterOption('--test', '-t', help="Test cluster.", cluster_provider=cluster_provider) with pytest.raises(ValueError): ClusterOption() with pytest.raises(ValueError): ClusterOption('--cluster') # requires clusters= with pytest.raises(ValueError): ClusterOption('--cluster', clusters=CLUSTER_LIST, cluster_provider=cluster_provider) class MockOptionParser(OptionParser): class Error(Exception): pass def error(self, msg): # per optparse documentation: # Print a usage message incorporating 'msg' to stderr and exit. # If you override this in a subclass, it should not return -- it # should either exit or raise an exception. raise self.Error(msg) def make_parser(): parser = MockOptionParser() parser.add_option(ClusterOption('--source_cluster', '-s', clusters=CLUSTER_LIST)) parser.add_option(ClusterOption('--dest_cluster', clusters=CLUSTER_LIST)) parser.add_option(ClusterOption('--cluster', cluster_provider=cluster_provider)) return parser def test_parsable(): parser = make_parser() values, _ = parser.parse_args(['--source_cluster=smf1-test', '--cluster=smf1-test']) assert isinstance(values.source_cluster, Cluster) assert isinstance(values.cluster, Cluster) def test_not_parsable(): parser = make_parser() try: parser.parse_args(['--source_cluster=borg']) except MockOptionParser.Error as e: assert 'borg is not a valid cluster for the --source_cluster option.' in e.args[0] else: assert False, 'Expected OptionParser to raise on invalid cluster list.'
mit
petewarden/tensorflow
tensorflow/python/data/experimental/ops/prefetching_ops.py
17
11416
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrapper for prefetching_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import structure from tensorflow.python.eager import function from tensorflow.python.framework import device as framework_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.util.tf_export import tf_export @tf_export("data.experimental.prefetch_to_device") def prefetch_to_device(device, buffer_size=None): """A transformation that prefetches dataset values to the given `device`. NOTE: Although the transformation creates a `tf.data.Dataset`, the transformation must be the final `Dataset` in the input pipeline. Args: device: A string. The name of a device to which elements will be prefetched. buffer_size: (Optional.) The number of elements to buffer on `device`. Defaults to an automatically chosen value. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): return dataset.apply( copy_to_device(target_device=device)).prefetch(buffer_size) return _apply_fn @tf_export("data.experimental.copy_to_device") def copy_to_device(target_device, source_device="/cpu:0"): """A transformation that copies dataset elements to the given `target_device`. Args: target_device: The name of a device to which elements will be copied. source_device: The original device on which `input_dataset` will be placed. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = False return _CopyToDeviceDataset( dataset, target_device=target_device, source_device=source_device).with_options(options) return _apply_fn # TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate # all inputs to the Op are in host memory, thereby avoiding some unnecessary # Sends and Recvs. class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset): """A `Dataset` that copies elements to another device.""" def __init__(self, input_dataset, target_device, source_device="/cpu:0"): """Constructs a _CopyToDeviceDataset. Args: input_dataset: `Dataset` to be copied target_device: The name of the device to which elements would be copied. source_device: Device where input_dataset would be placed. """ self._input_dataset = input_dataset self._target_device = target_device spec = framework_device.DeviceSpec().from_string(self._target_device) self._is_gpu_target = (spec.device_type == "GPU") self._source_device_string = source_device self._source_device = ops.convert_to_tensor(source_device) wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant( self._input_dataset._variant_tensor) # pylint: disable=protected-access @function.defun() def _init_func(): """Creates an iterator for the input dataset. Returns: A `string` tensor that encapsulates the iterator created. """ ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant) resource = gen_dataset_ops.anonymous_iterator( **self._input_dataset._flat_structure) # pylint: disable=protected-access with ops.control_dependencies( [gen_dataset_ops.make_iterator(ds_variant, resource)]): return gen_dataset_ops.iterator_to_string_handle(resource) init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access @function.defun() def _remote_init_func(): return functional_ops.remote_call( target=self._source_device, args=init_func_concrete.captured_inputs, Tout=[dtypes.string], f=init_func_concrete) self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access self._init_captured_args = self._init_func.captured_inputs @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def _next_func(string_handle): """Calls get_next for created iterator. Args: string_handle: An iterator string handle created by _init_func Returns: The elements generated from `input_dataset` """ with ops.device(self._source_device_string): iterator = iterator_ops.Iterator.from_string_handle( string_handle, dataset_ops.get_legacy_output_types(self), dataset_ops.get_legacy_output_shapes(self), dataset_ops.get_legacy_output_classes(self)) return structure.to_tensor_list(self.element_spec, iterator.get_next()) next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access @function.defun_with_attributes( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], attributes={"experimental_ints_on_device": True}) def _remote_next_func(string_handle): return functional_ops.remote_call( target=self._source_device, args=[string_handle] + next_func_concrete.captured_inputs, Tout=self._input_dataset._flat_types, # pylint: disable=protected-access f=next_func_concrete) self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access self._next_captured_args = self._next_func.captured_inputs @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def _finalize_func(string_handle): """Destroys the iterator resource created. Args: string_handle: An iterator string handle created by _init_func Returns: Tensor constant 0 """ iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2( string_handle, **self._input_dataset._flat_structure) # pylint: disable=protected-access with ops.control_dependencies([ resource_variable_ops.destroy_resource_op( iterator_resource, ignore_lookup_error=True)]): return array_ops.constant(0, dtypes.int64) finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def _remote_finalize_func(string_handle): return functional_ops.remote_call( target=self._source_device, args=[string_handle] + finalize_func_concrete.captured_inputs, Tout=[dtypes.int64], f=finalize_func_concrete) self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access ) self._finalize_captured_args = self._finalize_func.captured_inputs g = ops.get_default_graph() self._init_func.add_to_graph(g) self._next_func.add_to_graph(g) self._finalize_func.add_to_graph(g) # pylint: enable=protected-scope with ops.device(self._target_device): variant_tensor = gen_dataset_ops.generator_dataset( self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._input_dataset._flat_structure) # pylint: disable=protected-access super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor) # The one_shot_iterator implementation needs a 0 arg _make_dataset function # that thereby captures all the inputs required to create the dataset. Since # there are strings that are inputs to the GeneratorDataset which can't be # placed on a GPU, this fails for the GPU case. Therefore, disabling it for # GPU def make_one_shot_iterator(self): if self._is_gpu_target: raise ValueError("Cannot create a one shot iterator when using " "`tf.data.experimental.copy_to_device()` on GPU. Please " "use `Dataset.make_initializable_iterator()` instead.") else: return super(_CopyToDeviceDataset, self).make_one_shot_iterator() class _MapOnGpuDataset(dataset_ops.UnaryDataset): """A `Dataset` that maps a function over elements in its using a GPU.""" def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True): """See `Dataset.map()` for details.""" self._input_dataset = input_dataset self._use_inter_op_parallelism = use_inter_op_parallelism self._map_func = dataset_ops.StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset, defun_kwargs={"experimental_ints_on_device": True}) variant_tensor = ged_ops.experimental_map_dataset( self._input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, f=self._map_func.function, use_inter_op_parallelism=self._use_inter_op_parallelism, **self._flat_structure) super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._map_func.output_structure def _transformation_name(self): return "map_on_gpu()" def map_on_gpu(map_func): """Maps `map_func` across the elements of this dataset. NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs `map_func` on GPU. It must be used after applying the `tf.data.experimental.copy_to_device` transformation with a GPU device argument. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to another nested structure of tensors. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): return _MapOnGpuDataset(dataset, map_func) return _apply_fn
apache-2.0
ctismer/pyfilesystem
fs/contrib/tahoelafs/__init__.py
14
15472
''' fs.contrib.tahoelafs ==================== This modules provides a PyFilesystem interface to the Tahoe Least Authority File System. Tahoe-LAFS is a distributed, encrypted, fault-tolerant storage system: http://tahoe-lafs.org/ You will need access to a Tahoe-LAFS "web api" service. Example (it will use publicly available (but slow) Tahoe-LAFS cloud):: from fs.contrib.tahoelafs import TahoeLAFS, Connection dircap = TahoeLAFS.createdircap(webapi='http://insecure.tahoe-lafs.org') print "Your dircap (unique key to your storage directory) is", dircap print "Keep it safe!" fs = TahoeLAFS(dircap, autorun=False, webapi='http://insecure.tahoe-lafs.org') f = fs.open("foo.txt", "a") f.write('bar!') f.close() print "Now visit %s and enjoy :-)" % fs.getpathurl('foo.txt') When any problem occurred, you can turn on internal debugging messages:: import logging l = logging.getLogger() l.setLevel(logging.DEBUG) l.addHandler(logging.StreamHandler(sys.stdout)) ... your Python code using TahoeLAFS ... TODO: * unicode support * try network errors / bad happiness * exceptions * tests * sanitize all path types (., /) * support for extra large file uploads (poster module) * Possibility to block write until upload done (Tahoe mailing list) * Report something sane when Tahoe crashed/unavailable * solve failed unit tests (makedir_winner, ...) * file times * docs & author * python3 support * remove creating blank files (depends on FileUploadManager) TODO (Not TahoeLAFS specific tasks): * RemoteFileBuffer on the fly buffering support * RemoteFileBuffer unit tests * RemoteFileBuffer submit to trunk * Implement FileUploadManager + faking isfile/exists of just processing file * pyfilesystem docs is outdated (rename, movedir, ...) ''' import stat as statinfo import logging from logging import DEBUG, INFO, ERROR, CRITICAL import fs import fs.errors as errors from fs.path import abspath, relpath, normpath, dirname, pathjoin from fs.base import FS, NullFile from fs import _thread_synchronize_default, SEEK_END from fs.remote import CacheFSMixin, RemoteFileBuffer from fs.base import fnmatch, NoDefaultMeta from util import TahoeUtil from connection import Connection from six import b logger = fs.getLogger('fs.tahoelafs') def _fix_path(func): """Method decorator for automatically normalising paths.""" def wrapper(self, *args, **kwds): if len(args): args = list(args) args[0] = _fixpath(args[0]) return func(self, *args, **kwds) return wrapper def _fixpath(path): """Normalize the given path.""" return abspath(normpath(path)) class _TahoeLAFS(FS): """FS providing raw access to a Tahoe-LAFS Filesystem. This class implements all the details of interacting with a Tahoe-backed filesystem, but you probably don't want to use it in practice. Use the TahoeLAFS class instead, which has some internal caching to improve performance. """ _meta = { 'virtual' : False, 'read_only' : False, 'unicode_paths' : True, 'case_insensitive_paths' : False, 'network' : True } def __init__(self, dircap, largefilesize=10*1024*1024, webapi='http://127.0.0.1:3456'): '''Creates instance of TahoeLAFS. :param dircap: special hash allowing user to work with TahoeLAFS directory. :param largefilesize: - Create placeholder file for files larger than this treshold. Uploading and processing of large files can last extremely long (many hours), so placing this placeholder can help you to remember that upload is processing. Setting this to None will skip creating placeholder files for any uploads. ''' self.dircap = dircap if not dircap.endswith('/') else dircap[:-1] self.largefilesize = largefilesize self.connection = Connection(webapi) self.tahoeutil = TahoeUtil(webapi) super(_TahoeLAFS, self).__init__(thread_synchronize=_thread_synchronize_default) def __str__(self): return "<TahoeLAFS: %s>" % self.dircap @classmethod def createdircap(cls, webapi='http://127.0.0.1:3456'): return TahoeUtil(webapi).createdircap() def getmeta(self,meta_name,default=NoDefaultMeta): if meta_name == "read_only": return self.dircap.startswith('URI:DIR2-RO') return super(_TahoeLAFS,self).getmeta(meta_name,default) @_fix_path def open(self, path, mode='r', **kwargs): self._log(INFO, 'Opening file %s in mode %s' % (path, mode)) newfile = False if not self.exists(path): if 'w' in mode or 'a' in mode: newfile = True else: self._log(DEBUG, "File %s not found while opening for reads" % path) raise errors.ResourceNotFoundError(path) elif self.isdir(path): self._log(DEBUG, "Path %s is directory, not a file" % path) raise errors.ResourceInvalidError(path) elif 'w' in mode: newfile = True if newfile: self._log(DEBUG, 'Creating empty file %s' % path) if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') self.setcontents(path, b('')) handler = NullFile() else: self._log(DEBUG, 'Opening existing file %s for reading' % path) handler = self.getrange(path,0) return RemoteFileBuffer(self, path, mode, handler, write_on_flush=False) @_fix_path def desc(self, path): try: return self.getinfo(path) except: return '' @_fix_path def exists(self, path): try: self.getinfo(path) self._log(DEBUG, "Path %s exists" % path) return True except errors.ResourceNotFoundError: self._log(DEBUG, "Path %s does not exists" % path) return False except errors.ResourceInvalidError: self._log(DEBUG, "Path %s does not exists, probably misspelled URI" % path) return False @_fix_path def getsize(self, path): try: size = self.getinfo(path)['size'] self._log(DEBUG, "Size of %s is %d" % (path, size)) return size except errors.ResourceNotFoundError: return 0 @_fix_path def isfile(self, path): try: isfile = (self.getinfo(path)['type'] == 'filenode') except errors.ResourceNotFoundError: #isfile = not path.endswith('/') isfile = False self._log(DEBUG, "Path %s is file: %d" % (path, isfile)) return isfile @_fix_path def isdir(self, path): try: isdir = (self.getinfo(path)['type'] == 'dirnode') except errors.ResourceNotFoundError: isdir = False self._log(DEBUG, "Path %s is directory: %d" % (path, isdir)) return isdir def listdir(self, *args, **kwargs): return [ item[0] for item in self.listdirinfo(*args, **kwargs) ] def listdirinfo(self, *args, **kwds): return list(self.ilistdirinfo(*args,**kwds)) def ilistdir(self, *args, **kwds): for item in self.ilistdirinfo(*args,**kwds): yield item[0] @_fix_path def ilistdirinfo(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): self._log(DEBUG, "Listing directory (listdirinfo) %s" % path) if dirs_only and files_only: raise ValueError("dirs_only and files_only can not both be True") for item in self.tahoeutil.list(self.dircap, path): if dirs_only and item['type'] == 'filenode': continue elif files_only and item['type'] == 'dirnode': continue if wildcard is not None: if isinstance(wildcard,basestring): if not fnmatch.fnmatch(item['name'], wildcard): continue else: if not wildcard(item['name']): continue if full: item_path = relpath(pathjoin(path, item['name'])) elif absolute: item_path = abspath(pathjoin(path, item['name'])) else: item_path = item['name'] yield (item_path, item) @_fix_path def remove(self, path): self._log(INFO, 'Removing file %s' % path) if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') if not self.isfile(path): if not self.isdir(path): raise errors.ResourceNotFoundError(path) raise errors.ResourceInvalidError(path) try: self.tahoeutil.unlink(self.dircap, path) except Exception, e: raise errors.ResourceInvalidError(path) @_fix_path def removedir(self, path, recursive=False, force=False): self._log(INFO, "Removing directory %s" % path) if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') if not self.isdir(path): if not self.isfile(path): raise errors.ResourceNotFoundError(path) raise errors.ResourceInvalidError(path) if not force and self.listdir(path): raise errors.DirectoryNotEmptyError(path) self.tahoeutil.unlink(self.dircap, path) if recursive and path != '/': try: self.removedir(dirname(path), recursive=True) except errors.DirectoryNotEmptyError: pass @_fix_path def makedir(self, path, recursive=False, allow_recreate=False): self._log(INFO, "Creating directory %s" % path) if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') if self.exists(path): if not self.isdir(path): raise errors.ResourceInvalidError(path) if not allow_recreate: raise errors.DestinationExistsError(path) if not recursive and not self.exists(dirname(path)): raise errors.ParentDirectoryMissingError(path) self.tahoeutil.mkdir(self.dircap, path) def movedir(self, src, dst, overwrite=False): self.move(src, dst, overwrite=overwrite) def move(self, src, dst, overwrite=False): self._log(INFO, "Moving file from %s to %s" % (src, dst)) if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') src = _fixpath(src) dst = _fixpath(dst) if not self.exists(dirname(dst)): raise errors.ParentDirectoryMissingError(dst) if not overwrite and self.exists(dst): raise errors.DestinationExistsError(dst) self.tahoeutil.move(self.dircap, src, dst) def rename(self, src, dst): self.move(src, dst) def copy(self, src, dst, overwrite=False, chunk_size=16384): if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') # FIXME: this is out of date; how to do native tahoe copy? # FIXME: Workaround because isfile() not exists on _TahoeLAFS FS.copy(self, src, dst, overwrite, chunk_size) def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') # FIXME: this is out of date; how to do native tahoe copy? # FIXME: Workaround because isfile() not exists on _TahoeLAFS FS.copydir(self, src, dst, overwrite, ignore_errors, chunk_size) def _log(self, level, message): if not logger.isEnabledFor(level): return logger.log(level, u'(%d) %s' % (id(self), unicode(message).encode('ASCII', 'replace'))) @_fix_path def getpathurl(self, path, allow_none=False, webapi=None): ''' Retrieve URL where the file/directory is stored ''' if webapi == None: webapi = self.connection.webapi self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi)) path = self.tahoeutil.fixwinpath(path, False) return u"%s/uri/%s%s" % (webapi, self.dircap, path) @_fix_path def getrange(self, path, offset, length=None): return self.connection.get(u'/uri/%s%s' % (self.dircap, path), offset=offset, length=length) @_fix_path def setcontents(self, path, file, chunk_size=64*1024): self._log(INFO, 'Uploading file %s' % path) size=None if self.getmeta("read_only"): raise errors.UnsupportedError('read only filesystem') # Workaround for large files: # First create zero file placeholder, then # upload final content. if self.largefilesize != None and getattr(file, 'read', None): # As 'file' can be also a string, need to check, # if 'file' looks like duck. Sorry, file. file.seek(0, SEEK_END) size = file.tell() file.seek(0) if size > self.largefilesize: self.connection.put(u'/uri/%s%s' % (self.dircap, path), "PyFilesystem.TahoeLAFS: Upload started, final size %d" % size) self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size) @_fix_path def getinfo(self, path): self._log(INFO, 'Reading meta for %s' % path) info = self.tahoeutil.info(self.dircap, path) #import datetime #info['created_time'] = datetime.datetime.now() #info['modified_time'] = datetime.datetime.now() #info['accessed_time'] = datetime.datetime.now() if info['type'] == 'filenode': info["st_mode"] = 0x700 | statinfo.S_IFREG elif info['type'] == 'dirnode': info["st_mode"] = 0x700 | statinfo.S_IFDIR return info class TahoeLAFS(CacheFSMixin,_TahoeLAFS): """FS providing cached access to a Tahoe Filesystem. This class is the preferred means to access a Tahoe filesystem. It maintains an internal cache of recently-accessed metadata to speed up operations. """ def __init__(self, *args, **kwds): kwds.setdefault("cache_timeout",60) super(TahoeLAFS,self).__init__(*args,**kwds)
bsd-3-clause
kumarkrishna/sympy
sympy/printing/latex.py
7
71602
""" A Printer which converts an expression into its LaTeX equivalent. """ from __future__ import print_function, division from sympy.core import S, Add, Symbol from sympy.core.function import _coeff_isneg from sympy.core.sympify import SympifyError from sympy.core.alphabets import greeks from sympy.core.operations import AssocOp from sympy.logic.boolalg import true ## sympy.printing imports from .printer import Printer from .conventions import split_super_sub, requires_partial from .precedence import precedence, PRECEDENCE import mpmath.libmp as mlib from mpmath.libmp import prec_to_dps from sympy.core.compatibility import default_sort_key, range from sympy.utilities.iterables import has_variety import re # Hand-picked functions which can be used directly in both LaTeX and MathJax # Complete list at http://www.mathjax.org/docs/1.1/tex.html#supported-latex-commands # This variable only contains those functions which sympy uses. accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec', 'csc', 'cot', 'coth', 're', 'im', 'frac', 'root', 'arg', ] tex_greek_dictionary = { 'Alpha': 'A', 'Beta': 'B', 'Epsilon': 'E', 'Zeta': 'Z', 'Eta': 'H', 'Iota': 'I', 'Kappa': 'K', 'Mu': 'M', 'Nu': 'N', 'omicron': 'o', 'Omicron': 'O', 'Rho': 'P', 'Tau': 'T', 'Chi': 'X', 'lamda': r'\lambda', 'Lamda': r'\Lambda', 'khi': r'\chi', 'Khi': r'X', 'varepsilon': r'\varepsilon', 'varkappa': r'\varkappa', 'varphi': r'\varphi', 'varpi': r'\varpi', 'varrho': r'\varrho', 'varsigma': r'\varsigma', 'vartheta': r'\vartheta', } other_symbols = set(['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar', 'hslash', 'mho', 'wp', ]) # Variable name modifiers modifier_dict = { # Accents 'mathring': lambda s: r'\mathring{'+s+r'}', 'ddddot': lambda s: r'\ddddot{'+s+r'}', 'dddot': lambda s: r'\dddot{'+s+r'}', 'ddot': lambda s: r'\ddot{'+s+r'}', 'dot': lambda s: r'\dot{'+s+r'}', 'check': lambda s: r'\check{'+s+r'}', 'breve': lambda s: r'\breve{'+s+r'}', 'acute': lambda s: r'\acute{'+s+r'}', 'grave': lambda s: r'\grave{'+s+r'}', 'tilde': lambda s: r'\tilde{'+s+r'}', 'hat': lambda s: r'\hat{'+s+r'}', 'bar': lambda s: r'\bar{'+s+r'}', 'vec': lambda s: r'\vec{'+s+r'}', 'prime': lambda s: "{"+s+"}'", 'prm': lambda s: "{"+s+"}'", # Faces 'bold': lambda s: r'\boldsymbol{'+s+r'}', 'bm': lambda s: r'\boldsymbol{'+s+r'}', 'cal': lambda s: r'\mathcal{'+s+r'}', 'scr': lambda s: r'\mathscr{'+s+r'}', 'frak': lambda s: r'\mathfrak{'+s+r'}', # Brackets 'norm': lambda s: r'\left\|{'+s+r'}\right\|', 'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle', 'abs': lambda s: r'\left|{'+s+r'}\right|', 'mag': lambda s: r'\left|{'+s+r'}\right|', } greek_letters_set = frozenset(greeks) class LatexPrinter(Printer): printmethod = "_latex" _default_settings = { "order": None, "mode": "plain", "itex": False, "fold_frac_powers": False, "fold_func_brackets": False, "fold_short_frac": None, "long_frac_ratio": 2, "mul_symbol": None, "inv_trig_style": "abbreviated", "mat_str": None, "mat_delim": "[", "symbol_names": {}, } def __init__(self, settings=None): Printer.__init__(self, settings) if 'mode' in self._settings: valid_modes = ['inline', 'plain', 'equation', 'equation*'] if self._settings['mode'] not in valid_modes: raise ValueError("'mode' must be one of 'inline', 'plain', " "'equation' or 'equation*'") if self._settings['fold_short_frac'] is None and \ self._settings['mode'] == 'inline': self._settings['fold_short_frac'] = True mul_symbol_table = { None: r" ", "ldot": r" \,.\, ", "dot": r" \cdot ", "times": r" \times " } self._settings['mul_symbol_latex'] = \ mul_symbol_table[self._settings['mul_symbol']] self._settings['mul_symbol_latex_numbers'] = \ mul_symbol_table[self._settings['mul_symbol'] or 'dot'] self._delim_dict = {'(': ')', '[': ']'} def parenthesize(self, item, level): if precedence(item) <= level: return r"\left(%s\right)" % self._print(item) else: return self._print(item) def doprint(self, expr): tex = Printer.doprint(self, expr) if self._settings['mode'] == 'plain': return tex elif self._settings['mode'] == 'inline': return r"$%s$" % tex elif self._settings['itex']: return r"$$%s$$" % tex else: env_str = self._settings['mode'] return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str) def _needs_brackets(self, expr): """ Returns True if the expression needs to be wrapped in brackets when printed, False otherwise. For example: a + b => True; a => False; 10 => False; -10 => True. """ return not ((expr.is_Integer and expr.is_nonnegative) or (expr.is_Atom and (expr is not S.NegativeOne and expr.is_Rational is False))) def _needs_function_brackets(self, expr): """ Returns True if the expression needs to be wrapped in brackets when passed as an argument to a function, False otherwise. This is a more liberal version of _needs_brackets, in that many expressions which need to be wrapped in brackets when added/subtracted/raised to a power do not need them when passed to a function. Such an example is a*b. """ if not self._needs_brackets(expr): return False else: # Muls of the form a*b*c... can be folded if expr.is_Mul and not self._mul_is_clean(expr): return True # Pows which don't need brackets can be folded elif expr.is_Pow and not self._pow_is_clean(expr): return True # Add and Function always need brackets elif expr.is_Add or expr.is_Function: return True else: return False def _needs_mul_brackets(self, expr, first=False, last=False): """ Returns True if the expression needs to be wrapped in brackets when printed as part of a Mul, False otherwise. This is True for Add, but also for some container objects that would not need brackets when appearing last in a Mul, e.g. an Integral. ``last=True`` specifies that this expr is the last to appear in a Mul. ``first=True`` specifies that this expr is the first to appear in a Mul. """ from sympy import Integral, Piecewise, Product, Sum if expr.is_Add: return True elif expr.is_Relational: return True elif expr.is_Mul: if not first and _coeff_isneg(expr): return True if (not last and any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])): return True return False def _needs_add_brackets(self, expr): """ Returns True if the expression needs to be wrapped in brackets when printed as part of an Add, False otherwise. This is False for most things. """ if expr.is_Relational: return True return False def _mul_is_clean(self, expr): for arg in expr.args: if arg.is_Function: return False return True def _pow_is_clean(self, expr): return not self._needs_brackets(expr.base) def _do_exponent(self, expr, exp): if exp is not None: return r"\left(%s\right)^{%s}" % (expr, exp) else: return expr def _print_bool(self, e): return r"\mathrm{%s}" % e _print_BooleanTrue = _print_bool _print_BooleanFalse = _print_bool def _print_NoneType(self, e): return r"\mathrm{%s}" % e def _print_Add(self, expr, order=None): if self.order == 'none': terms = list(expr.args) else: terms = self._as_ordered_terms(expr, order=order) tex = "" for i, term in enumerate(terms): if i == 0: pass elif _coeff_isneg(term): tex += " - " term = -term else: tex += " + " term_tex = self._print(term) if self._needs_add_brackets(term): term_tex = r"\left(%s\right)" % term_tex tex += term_tex return tex def _print_Float(self, expr): # Based off of that in StrPrinter dps = prec_to_dps(expr._prec) str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True) # Must always have a mul symbol (as 2.5 10^{20} just looks odd) # thus we use the number separator separator = self._settings['mul_symbol_latex_numbers'] if 'e' in str_real: (mant, exp) = str_real.split('e') if exp[0] == '+': exp = exp[1:] return r"%s%s10^{%s}" % (mant, separator, exp) elif str_real == "+inf": return r"\infty" elif str_real == "-inf": return r"- \infty" else: return str_real def _print_Mul(self, expr): if _coeff_isneg(expr): expr = -expr tex = "- " else: tex = "" from sympy.simplify import fraction numer, denom = fraction(expr, exact=True) separator = self._settings['mul_symbol_latex'] numbersep = self._settings['mul_symbol_latex_numbers'] def convert(expr): if not expr.is_Mul: return str(self._print(expr)) else: _tex = last_term_tex = "" if self.order not in ('old', 'none'): args = expr.as_ordered_factors() else: args = expr.args for i, term in enumerate(args): term_tex = self._print(term) if self._needs_mul_brackets(term, first=(i == 0), last=(i == len(args) - 1)): term_tex = r"\left(%s\right)" % term_tex if re.search("[0-9][} ]*$", last_term_tex) and \ re.match("[{ ]*[-+0-9]", term_tex): # between two numbers _tex += numbersep elif _tex: _tex += separator _tex += term_tex last_term_tex = term_tex return _tex if denom is S.One: # use the original expression here, since fraction() may have # altered it when producing numer and denom tex += convert(expr) else: snumer = convert(numer) sdenom = convert(denom) ldenom = len(sdenom.split()) ratio = self._settings['long_frac_ratio'] if self._settings['fold_short_frac'] \ and ldenom <= 2 and not "^" in sdenom: # handle short fractions if self._needs_mul_brackets(numer, last=False): tex += r"\left(%s\right) / %s" % (snumer, sdenom) else: tex += r"%s / %s" % (snumer, sdenom) elif len(snumer.split()) > ratio*ldenom: # handle long fractions if self._needs_mul_brackets(numer, last=True): tex += r"\frac{1}{%s}%s\left(%s\right)" \ % (sdenom, separator, snumer) elif numer.is_Mul: # split a long numerator a = S.One b = S.One for x in numer.args: if self._needs_mul_brackets(x, last=False) or \ len(convert(a*x).split()) > ratio*ldenom or \ (b.is_commutative is x.is_commutative is False): b *= x else: a *= x if self._needs_mul_brackets(b, last=True): tex += r"\frac{%s}{%s}%s\left(%s\right)" \ % (convert(a), sdenom, separator, convert(b)) else: tex += r"\frac{%s}{%s}%s%s" \ % (convert(a), sdenom, separator, convert(b)) else: tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer) else: tex += r"\frac{%s}{%s}" % (snumer, sdenom) return tex def _print_Pow(self, expr): # Treat x**Rational(1,n) as special case if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1: base = self._print(expr.base) expq = expr.exp.q if expq == 2: tex = r"\sqrt{%s}" % base elif self._settings['itex']: tex = r"\root{%d}{%s}" % (expq, base) else: tex = r"\sqrt[%d]{%s}" % (expq, base) if expr.exp.is_negative: return r"\frac{1}{%s}" % tex else: return tex elif self._settings['fold_frac_powers'] \ and expr.exp.is_Rational \ and expr.exp.q != 1: base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q if expr.base.is_Function: return self._print(expr.base, "%s/%s" % (p, q)) if self._needs_brackets(expr.base): return r"\left(%s\right)^{%s/%s}" % (base, p, q) return r"%s^{%s/%s}" % (base, p, q) elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_commutative: # Things like 1/x return self._print_Mul(expr) else: if expr.base.is_Function: return self._print(expr.base, self._print(expr.exp)) else: if expr.is_commutative and expr.exp == -1: #solves issue 4129 #As Mul always simplify 1/x to x**-1 #The objective is achieved with this hack #first we get the latex for -1 * expr, #which is a Mul expression tex = self._print(S.NegativeOne * expr).strip() #the result comes with a minus and a space, so we remove if tex[:1] == "-": return tex[1:].strip() if self._needs_brackets(expr.base): tex = r"\left(%s\right)^{%s}" else: tex = r"%s^{%s}" return tex % (self._print(expr.base), self._print(expr.exp)) def _print_Sum(self, expr): if len(expr.limits) == 1: tex = r"\sum_{%s=%s}^{%s} " % \ tuple([ self._print(i) for i in expr.limits[0] ]) else: def _format_ineq(l): return r"%s \leq %s \leq %s" % \ tuple([self._print(s) for s in (l[1], l[0], l[2])]) tex = r"\sum_{\substack{%s}} " % \ str.join('\\\\', [ _format_ineq(l) for l in expr.limits ]) if isinstance(expr.function, Add): tex += r"\left(%s\right)" % self._print(expr.function) else: tex += self._print(expr.function) return tex def _print_Product(self, expr): if len(expr.limits) == 1: tex = r"\prod_{%s=%s}^{%s} " % \ tuple([ self._print(i) for i in expr.limits[0] ]) else: def _format_ineq(l): return r"%s \leq %s \leq %s" % \ tuple([self._print(s) for s in (l[1], l[0], l[2])]) tex = r"\prod_{\substack{%s}} " % \ str.join('\\\\', [ _format_ineq(l) for l in expr.limits ]) if isinstance(expr.function, Add): tex += r"\left(%s\right)" % self._print(expr.function) else: tex += self._print(expr.function) return tex def _print_BasisDependent(self, expr): from sympy.vector import Vector o1 = [] if expr == expr.zero: return expr.zero._latex_form if isinstance(expr, Vector): items = expr.separate().items() else: items = [(0, expr)] for system, vect in items: inneritems = list(vect.components.items()) inneritems.sort(key = lambda x:x[0].__str__()) for k, v in inneritems: if v == 1: o1.append(' + ' + k._latex_form) elif v == -1: o1.append(' - ' + k._latex_form) else: arg_str = '(' + LatexPrinter().doprint(v) + ')' o1.append(' + ' + arg_str + k._latex_form) outstr = (''.join(o1)) if outstr[1] != '-': outstr = outstr[3:] else: outstr = outstr[1:] return outstr def _print_Indexed(self, expr): tex = self._print(expr.base)+'_{%s}' % ','.join( map(self._print, expr.indices)) return tex def _print_IndexedBase(self, expr): return self._print(expr.label) def _print_Derivative(self, expr): dim = len(expr.variables) if requires_partial(expr): diff_symbol = r'\partial' else: diff_symbol = r'd' if dim == 1: tex = r"\frac{%s}{%s %s}" % (diff_symbol, diff_symbol, self._print(expr.variables[0])) else: multiplicity, i, tex = [], 1, "" current = expr.variables[0] for symbol in expr.variables[1:]: if symbol == current: i = i + 1 else: multiplicity.append((current, i)) current, i = symbol, 1 else: multiplicity.append((current, i)) for x, i in multiplicity: if i == 1: tex += r"%s %s" % (diff_symbol, self._print(x)) else: tex += r"%s %s^{%s}" % (diff_symbol, self._print(x), i) tex = r"\frac{%s^{%s}}{%s} " % (diff_symbol, dim, tex) if isinstance(expr.expr, AssocOp): return r"%s\left(%s\right)" % (tex, self._print(expr.expr)) else: return r"%s %s" % (tex, self._print(expr.expr)) def _print_Subs(self, subs): expr, old, new = subs.args latex_expr = self._print(expr) latex_old = (self._print(e) for e in old) latex_new = (self._print(e) for e in new) latex_subs = r'\\ '.join( e[0] + '=' + e[1] for e in zip(latex_old, latex_new)) return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr, latex_subs) def _print_Integral(self, expr): tex, symbols = "", [] # Only up to \iiiint exists if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits): # Use len(expr.limits)-1 so that syntax highlighters don't think # \" is an escaped quote tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt" symbols = [r"\, d%s" % self._print(symbol[0]) for symbol in expr.limits] else: for lim in reversed(expr.limits): symbol = lim[0] tex += r"\int" if len(lim) > 1: if self._settings['mode'] in ['equation', 'equation*'] \ and not self._settings['itex']: tex += r"\limits" if len(lim) == 3: tex += "_{%s}^{%s}" % (self._print(lim[1]), self._print(lim[2])) if len(lim) == 2: tex += "^{%s}" % (self._print(lim[1])) symbols.insert(0, r"\, d%s" % self._print(symbol)) return r"%s %s%s" % (tex, str(self._print(expr.function)), "".join(symbols)) def _print_Limit(self, expr): e, z, z0, dir = expr.args tex = r"\lim_{%s \to " % self._print(z) if z0 in (S.Infinity, S.NegativeInfinity): tex += r"%s}" % self._print(z0) else: tex += r"%s^%s}" % (self._print(z0), self._print(dir)) if isinstance(e, AssocOp): return r"%s\left(%s\right)" % (tex, self._print(e)) else: return r"%s %s" % (tex, self._print(e)) def _hprint_Function(self, func): ''' Logic to decide how to render a function to latex - if it is a recognized latex name, use the appropriate latex command - if it is a single letter, just use that letter - if it is a longer name, then put \operatorname{} around it and be mindful of undercores in the name ''' func = self._deal_with_super_sub(func) if func in accepted_latex_functions: name = r"\%s" % func elif len(func) == 1 or func.startswith('\\'): name = func else: name = r"\operatorname{%s}" % func return name def _print_Function(self, expr, exp=None): ''' Render functions to LaTeX, handling functions that LaTeX knows about e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...). For single-letter function names, render them as regular LaTeX math symbols. For multi-letter function names that LaTeX does not know about, (e.g., Li, sech) use \operatorname{} so that the function name is rendered in Roman font and LaTeX handles spacing properly. expr is the expression involving the function exp is an exponent ''' func = expr.func.__name__ if hasattr(self, '_print_' + func): return getattr(self, '_print_' + func)(expr, exp) else: args = [ str(self._print(arg)) for arg in expr.args ] # How inverse trig functions should be displayed, formats are: # abbreviated: asin, full: arcsin, power: sin^-1 inv_trig_style = self._settings['inv_trig_style'] # If we are dealing with a power-style inverse trig function inv_trig_power_case = False # If it is applicable to fold the argument brackets can_fold_brackets = self._settings['fold_func_brackets'] and \ len(args) == 1 and \ not self._needs_function_brackets(expr.args[0]) inv_trig_table = ["asin", "acos", "atan", "acot"] # If the function is an inverse trig function, handle the style if func in inv_trig_table: if inv_trig_style == "abbreviated": func = func elif inv_trig_style == "full": func = "arc" + func[1:] elif inv_trig_style == "power": func = func[1:] inv_trig_power_case = True # Can never fold brackets if we're raised to a power if exp is not None: can_fold_brackets = False if inv_trig_power_case: if func in accepted_latex_functions: name = r"\%s^{-1}" % func else: name = r"\operatorname{%s}^{-1}" % func elif exp is not None: name = r'%s^{%s}' % (self._hprint_Function(func), exp) else: name = self._hprint_Function(func) if can_fold_brackets: if func in accepted_latex_functions: # Wrap argument safely to avoid parse-time conflicts # with the function name itself name += r" {%s}" else: name += r"%s" else: name += r"{\left (%s \right )}" if inv_trig_power_case and exp is not None: name += r"^{%s}" % exp return name % ",".join(args) def _print_UndefinedFunction(self, expr): return self._hprint_Function(str(expr)) def _print_FunctionClass(self, expr): if hasattr(expr, '_latex_no_arg'): return expr._latex_no_arg(self) return self._hprint_Function(str(expr)) def _print_Lambda(self, expr): symbols, expr = expr.args if len(symbols) == 1: symbols = self._print(symbols[0]) else: symbols = self._print(tuple(symbols)) args = (symbols, self._print(expr)) tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr)) return tex def _print_Min(self, expr, exp=None): args = sorted(expr.args, key=default_sort_key) texargs = [r"%s" % self._print(symbol) for symbol in args] tex = r"\min\left(%s\right)" % ", ".join(texargs) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_Max(self, expr, exp=None): args = sorted(expr.args, key=default_sort_key) texargs = [r"%s" % self._print(symbol) for symbol in args] tex = r"\max\left(%s\right)" % ", ".join(texargs) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_floor(self, expr, exp=None): tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_ceiling(self, expr, exp=None): tex = r"\lceil{%s}\rceil" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_Abs(self, expr, exp=None): tex = r"\left|{%s}\right|" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex _print_Determinant = _print_Abs def _print_re(self, expr, exp=None): if self._needs_brackets(expr.args[0]): tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0]) else: tex = r"\Re{%s}" % self._print(expr.args[0]) return self._do_exponent(tex, exp) def _print_im(self, expr, exp=None): if self._needs_brackets(expr.args[0]): tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0]) else: tex = r"\Im{%s}" % self._print(expr.args[0]) return self._do_exponent(tex, exp) def _print_Not(self, e): from sympy import Equivalent, Implies if isinstance(e.args[0], Equivalent): return self._print_Equivalent(e.args[0], r"\not\equiv") if isinstance(e.args[0], Implies): return self._print_Implies(e.args[0], r"\not\Rightarrow") if (e.args[0].is_Boolean): return r"\neg (%s)" % self._print(e.args[0]) else: return r"\neg %s" % self._print(e.args[0]) def _print_LogOp(self, args, char): arg = args[0] if arg.is_Boolean and not arg.is_Not: tex = r"\left(%s\right)" % self._print(arg) else: tex = r"%s" % self._print(arg) for arg in args[1:]: if arg.is_Boolean and not arg.is_Not: tex += r" %s \left(%s\right)" % (char, self._print(arg)) else: tex += r" %s %s" % (char, self._print(arg)) return tex def _print_And(self, e): args = sorted(e.args, key=default_sort_key) return self._print_LogOp(args, r"\wedge") def _print_Or(self, e): args = sorted(e.args, key=default_sort_key) return self._print_LogOp(args, r"\vee") def _print_Xor(self, e): args = sorted(e.args, key=default_sort_key) return self._print_LogOp(args, r"\veebar") def _print_Implies(self, e, altchar=None): return self._print_LogOp(e.args, altchar or r"\Rightarrow") def _print_Equivalent(self, e, altchar=None): args = sorted(e.args, key=default_sort_key) return self._print_LogOp(args, altchar or r"\equiv") def _print_conjugate(self, expr, exp=None): tex = r"\overline{%s}" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_polar_lift(self, expr, exp=None): func = r"\operatorname{polar\_lift}" arg = r"{\left (%s \right )}" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}%s" % (func, exp, arg) else: return r"%s%s" % (func, arg) def _print_ExpBase(self, expr, exp=None): # TODO should exp_polar be printed differently? # what about exp_polar(0), exp_polar(1)? tex = r"e^{%s}" % self._print(expr.args[0]) return self._do_exponent(tex, exp) def _print_elliptic_k(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"K^{%s}%s" % (exp, tex) else: return r"K%s" % tex def _print_elliptic_f(self, expr, exp=None): tex = r"\left(%s\middle| %s\right)" % \ (self._print(expr.args[0]), self._print(expr.args[1])) if exp is not None: return r"F^{%s}%s" % (exp, tex) else: return r"F%s" % tex def _print_elliptic_e(self, expr, exp=None): if len(expr.args) == 2: tex = r"\left(%s\middle| %s\right)" % \ (self._print(expr.args[0]), self._print(expr.args[1])) else: tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"E^{%s}%s" % (exp, tex) else: return r"E%s" % tex def _print_elliptic_pi(self, expr, exp=None): if len(expr.args) == 3: tex = r"\left(%s; %s\middle| %s\right)" % \ (self._print(expr.args[0]), self._print(expr.args[1]), \ self._print(expr.args[2])) else: tex = r"\left(%s\middle| %s\right)" % \ (self._print(expr.args[0]), self._print(expr.args[1])) if exp is not None: return r"\Pi^{%s}%s" % (exp, tex) else: return r"\Pi%s" % tex def _print_gamma(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"\Gamma^{%s}%s" % (exp, tex) else: return r"\Gamma%s" % tex def _print_uppergamma(self, expr, exp=None): tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]), self._print(expr.args[1])) if exp is not None: return r"\Gamma^{%s}%s" % (exp, tex) else: return r"\Gamma%s" % tex def _print_lowergamma(self, expr, exp=None): tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]), self._print(expr.args[1])) if exp is not None: return r"\gamma^{%s}%s" % (exp, tex) else: return r"\gamma%s" % tex def _print_expint(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[1]) nu = self._print(expr.args[0]) if exp is not None: return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex) else: return r"\operatorname{E}_{%s}%s" % (nu, tex) def _print_fresnels(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"S^{%s}%s" % (exp, tex) else: return r"S%s" % tex def _print_fresnelc(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"C^{%s}%s" % (exp, tex) else: return r"C%s" % tex def _print_subfactorial(self, expr, exp=None): x = expr.args[0] if self._needs_brackets(x): tex = r"!\left(%s\right)" % self._print(x) else: tex = "!" + self._print(x) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_factorial(self, expr, exp=None): x = expr.args[0] if self._needs_brackets(x): tex = r"\left(%s\right)!" % self._print(x) else: tex = self._print(x) + "!" if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_factorial2(self, expr, exp=None): x = expr.args[0] if self._needs_brackets(x): tex = r"\left(%s\right)!!" % self._print(x) else: tex = self._print(x) + "!!" if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_binomial(self, expr, exp=None): tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]), self._print(expr.args[1])) if exp is not None: return r"%s^{%s}" % (tex, exp) else: return tex def _print_RisingFactorial(self, expr, exp=None): n, k = expr.args if self._needs_brackets(n): base = r"\left(%s\right)" % self._print(n) else: base = self._print(n) tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k)) return self._do_exponent(tex, exp) def _print_FallingFactorial(self, expr, exp=None): n, k = expr.args if self._needs_brackets(k): sub = r"\left(%s\right)" % self._print(k) else: sub = self._print(k) tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub) return self._do_exponent(tex, exp) def _hprint_BesselBase(self, expr, exp, sym): tex = r"%s" % (sym) need_exp = False if exp is not None: if tex.find('^') == -1: tex = r"%s^{%s}" % (tex, self._print(exp)) else: need_exp = True tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order), self._print(expr.argument)) if need_exp: tex = self._do_exponent(tex, exp) return tex def _hprint_vec(self, vec): if len(vec) == 0: return "" s = "" for i in vec[:-1]: s += "%s, " % self._print(i) s += self._print(vec[-1]) return s def _print_besselj(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'J') def _print_besseli(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'I') def _print_besselk(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'K') def _print_bessely(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'Y') def _print_yn(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'y') def _print_jn(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'j') def _print_hankel1(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'H^{(1)}') def _print_hankel2(self, expr, exp=None): return self._hprint_BesselBase(expr, exp, 'H^{(2)}') def _hprint_airy(self, expr, exp=None, notation=""): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"%s^{%s}%s" % (notation, exp, tex) else: return r"%s%s" % (notation, tex) def _hprint_airy_prime(self, expr, exp=None, notation=""): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"{%s^\prime}^{%s}%s" % (notation, exp, tex) else: return r"%s^\prime%s" % (notation, tex) def _print_airyai(self, expr, exp=None): return self._hprint_airy(expr, exp, 'Ai') def _print_airybi(self, expr, exp=None): return self._hprint_airy(expr, exp, 'Bi') def _print_airyaiprime(self, expr, exp=None): return self._hprint_airy_prime(expr, exp, 'Ai') def _print_airybiprime(self, expr, exp=None): return self._hprint_airy_prime(expr, exp, 'Bi') def _print_hyper(self, expr, exp=None): tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \ r"\middle| {%s} \right)}" % \ (self._print(len(expr.ap)), self._print(len(expr.bq)), self._hprint_vec(expr.ap), self._hprint_vec(expr.bq), self._print(expr.argument)) if exp is not None: tex = r"{%s}^{%s}" % (tex, self._print(exp)) return tex def _print_meijerg(self, expr, exp=None): tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \ r"%s & %s \end{matrix} \middle| {%s} \right)}" % \ (self._print(len(expr.ap)), self._print(len(expr.bq)), self._print(len(expr.bm)), self._print(len(expr.an)), self._hprint_vec(expr.an), self._hprint_vec(expr.aother), self._hprint_vec(expr.bm), self._hprint_vec(expr.bother), self._print(expr.argument)) if exp is not None: tex = r"{%s}^{%s}" % (tex, self._print(exp)) return tex def _print_dirichlet_eta(self, expr, exp=None): tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"\eta^{%s}%s" % (self._print(exp), tex) return r"\eta%s" % tex def _print_zeta(self, expr, exp=None): if len(expr.args) == 2: tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args)) else: tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"\zeta^{%s}%s" % (self._print(exp), tex) return r"\zeta%s" % tex def _print_lerchphi(self, expr, exp=None): tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args)) if exp is None: return r"\Phi%s" % tex return r"\Phi^{%s}%s" % (self._print(exp), tex) def _print_polylog(self, expr, exp=None): s, z = map(self._print, expr.args) tex = r"\left(%s\right)" % z if exp is None: return r"\operatorname{Li}_{%s}%s" % (s, tex) return r"\operatorname{Li}_{%s}^{%s}%s" % (s, self._print(exp), tex) def _print_jacobi(self, expr, exp=None): n, a, b, x = map(self._print, expr.args) tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_gegenbauer(self, expr, exp=None): n, a, x = map(self._print, expr.args) tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_chebyshevt(self, expr, exp=None): n, x = map(self._print, expr.args) tex = r"T_{%s}\left(%s\right)" % (n, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_chebyshevu(self, expr, exp=None): n, x = map(self._print, expr.args) tex = r"U_{%s}\left(%s\right)" % (n, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_legendre(self, expr, exp=None): n, x = map(self._print, expr.args) tex = r"P_{%s}\left(%s\right)" % (n, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_assoc_legendre(self, expr, exp=None): n, a, x = map(self._print, expr.args) tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_hermite(self, expr, exp=None): n, x = map(self._print, expr.args) tex = r"H_{%s}\left(%s\right)" % (n, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_laguerre(self, expr, exp=None): n, x = map(self._print, expr.args) tex = r"L_{%s}\left(%s\right)" % (n, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_assoc_laguerre(self, expr, exp=None): n, a, x = map(self._print, expr.args) tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_Ynm(self, expr, exp=None): n, m, theta, phi = map(self._print, expr.args) tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_Znm(self, expr, exp=None): n, m, theta, phi = map(self._print, expr.args) tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi) if exp is not None: tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp)) return tex def _print_Rational(self, expr): if expr.q != 1: sign = "" p = expr.p if expr.p < 0: sign = "- " p = -p return r"%s\frac{%d}{%d}" % (sign, p, expr.q) else: return self._print(expr.p) def _print_Order(self, expr): s = self._print(expr.expr) if expr.point and any(p != S.Zero for p in expr.point) or \ len(expr.variables) > 1: s += '; ' if len(expr.variables) > 1: s += self._print(expr.variables) elif len(expr.variables): s += self._print(expr.variables[0]) s += r'\rightarrow' if len(expr.point) > 1: s += self._print(expr.point) else: s += self._print(expr.point[0]) return r"\mathcal{O}\left(%s\right)" % s def _print_Symbol(self, expr): if expr in self._settings['symbol_names']: return self._settings['symbol_names'][expr] return self._deal_with_super_sub(expr.name) if \ '\\' not in expr.name else expr.name _print_RandomSymbol = _print_Symbol _print_MatrixSymbol = _print_Symbol def _deal_with_super_sub(self, string): name, supers, subs = split_super_sub(string) name = translate(name) supers = [translate(sup) for sup in supers] subs = [translate(sub) for sub in subs] # glue all items together: if len(supers) > 0: name += "^{%s}" % " ".join(supers) if len(subs) > 0: name += "_{%s}" % " ".join(subs) return name def _print_Relational(self, expr): if self._settings['itex']: gt = r"\gt" lt = r"\lt" else: gt = ">" lt = "<" charmap = { "==": "=", ">": gt, "<": lt, ">=": r"\geq", "<=": r"\leq", "!=": r"\neq", } return "%s %s %s" % (self._print(expr.lhs), charmap[expr.rel_op], self._print(expr.rhs)) def _print_Piecewise(self, expr): ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c)) for e, c in expr.args[:-1]] if expr.args[-1].cond == true: ecpairs.append(r"%s & \text{otherwise}" % self._print(expr.args[-1].expr)) else: ecpairs.append(r"%s & \text{for}\: %s" % (self._print(expr.args[-1].expr), self._print(expr.args[-1].cond))) tex = r"\begin{cases} %s \end{cases}" return tex % r" \\".join(ecpairs) def _print_MatrixBase(self, expr): lines = [] for line in range(expr.rows): # horrible, should be 'rows' lines.append(" & ".join([ self._print(i) for i in expr[line, :] ])) mat_str = self._settings['mat_str'] if mat_str is None: if self._settings['mode'] == 'inline': mat_str = 'smallmatrix' else: if (expr.cols <= 10) is True: mat_str = 'matrix' else: mat_str = 'array' out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}' out_str = out_str.replace('%MATSTR%', mat_str) if mat_str == 'array': out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s') if self._settings['mat_delim']: left_delim = self._settings['mat_delim'] right_delim = self._delim_dict[left_delim] out_str = r'\left' + left_delim + out_str + \ r'\right' + right_delim return out_str % r"\\".join(lines) _print_ImmutableMatrix = _print_MatrixBase _print_Matrix = _print_MatrixBase def _print_MatrixElement(self, expr): return self._print(expr.parent) + '_{%s, %s}'%(expr.i, expr.j) def _print_MatrixSlice(self, expr): def latexslice(x): x = list(x) if x[2] == 1: del x[2] if x[1] == x[0] + 1: del x[1] if x[0] == 0: x[0] = '' return ':'.join(map(self._print, x)) return (self._print(expr.parent) + r'\left[' + latexslice(expr.rowslice) + ', ' + latexslice(expr.colslice) + r'\right]') def _print_BlockMatrix(self, expr): return self._print(expr.blocks) def _print_Transpose(self, expr): mat = expr.arg from sympy.matrices import MatrixSymbol if not isinstance(mat, MatrixSymbol): return r"\left(%s\right)^T" % self._print(mat) else: return "%s^T" % self._print(mat) def _print_Adjoint(self, expr): mat = expr.arg from sympy.matrices import MatrixSymbol if not isinstance(mat, MatrixSymbol): return r"\left(%s\right)^\dag" % self._print(mat) else: return "%s^\dag" % self._print(mat) def _print_MatAdd(self, expr): terms = list(expr.args) tex = " + ".join(map(self._print, terms)) return tex def _print_MatMul(self, expr): from sympy import Add, MatAdd, HadamardProduct def parens(x): if isinstance(x, (Add, MatAdd, HadamardProduct)): return r"\left(%s\right)" % self._print(x) return self._print(x) return ' '.join(map(parens, expr.args)) def _print_HadamardProduct(self, expr): from sympy import Add, MatAdd, MatMul def parens(x): if isinstance(x, (Add, MatAdd, MatMul)): return r"\left(%s\right)" % self._print(x) return self._print(x) return ' \circ '.join(map(parens, expr.args)) def _print_MatPow(self, expr): base, exp = expr.base, expr.exp from sympy.matrices import MatrixSymbol if not isinstance(base, MatrixSymbol): return r"\left(%s\right)^{%s}" % (self._print(base), self._print(exp)) else: return "%s^{%s}" % (self._print(base), self._print(exp)) def _print_ZeroMatrix(self, Z): return r"\mathbb{0}" def _print_Identity(self, I): return r"\mathbb{I}" def _print_tuple(self, expr): return r"\left ( %s\right )" % \ r", \quad ".join([ self._print(i) for i in expr ]) def _print_Tuple(self, expr): return self._print_tuple(expr) def _print_list(self, expr): return r"\left [ %s\right ]" % \ r", \quad ".join([ self._print(i) for i in expr ]) def _print_dict(self, d): keys = sorted(d.keys(), key=default_sort_key) items = [] for key in keys: val = d[key] items.append("%s : %s" % (self._print(key), self._print(val))) return r"\left \{ %s\right \}" % r", \quad ".join(items) def _print_Dict(self, expr): return self._print_dict(expr) def _print_DiracDelta(self, expr, exp=None): if len(expr.args) == 1 or expr.args[1] == 0: tex = r"\delta\left(%s\right)" % self._print(expr.args[0]) else: tex = r"\delta^{\left( %s \right)}\left( %s \right)" % ( self._print(expr.args[1]), self._print(expr.args[0])) if exp: tex = r"\left(%s\right)^{%s}" % (tex, exp) return tex def _print_Heaviside(self, expr, exp=None): tex = r"\theta\left(%s\right)" % self._print(expr.args[0]) if exp: tex = r"\left(%s\right)^{%s}" % (tex, exp) return tex def _print_KroneckerDelta(self, expr, exp=None): i = self._print(expr.args[0]) j = self._print(expr.args[1]) if expr.args[0].is_Atom and expr.args[1].is_Atom: tex = r'\delta_{%s %s}' % (i, j) else: tex = r'\delta_{%s, %s}' % (i, j) if exp: tex = r'\left(%s\right)^{%s}' % (tex, exp) return tex def _print_LeviCivita(self, expr, exp=None): indices = map(self._print, expr.args) if all(x.is_Atom for x in expr.args): tex = r'\varepsilon_{%s}' % " ".join(indices) else: tex = r'\varepsilon_{%s}' % ", ".join(indices) if exp: tex = r'\left(%s\right)^{%s}' % (tex, exp) return tex def _print_ProductSet(self, p): if len(p.sets) > 1 and not has_variety(p.sets): return self._print(p.sets[0]) + "^%d" % len(p.sets) else: return r" \times ".join(self._print(set) for set in p.sets) def _print_RandomDomain(self, d): try: return 'Domain: ' + self._print(d.as_boolean()) except Exception: try: return ('Domain: ' + self._print(d.symbols) + ' in ' + self._print(d.set)) except: return 'Domain on ' + self._print(d.symbols) def _print_FiniteSet(self, s): items = sorted(s.args, key=default_sort_key) return self._print_set(items) def _print_set(self, s): items = sorted(s, key=default_sort_key) items = ", ".join(map(self._print, items)) return r"\left\{%s\right\}" % items _print_frozenset = _print_set def _print_Range(self, s): if len(s) > 4: it = iter(s) printset = next(it), next(it), '\ldots', s._last_element else: printset = tuple(s) return (r"\left\{" + r", ".join(self._print(el) for el in printset) + r"\right\}") def _print_SeqFormula(self, s): if s.start is S.NegativeInfinity: stop = s.stop printset = ('\ldots', s.coeff(stop - 3), s.coeff(stop - 2), s.coeff(stop - 1), s.coeff(stop)) elif s.stop is S.Infinity or s.length > 4: printset = s[:4] printset.append('\ldots') else: printset = tuple(s) return (r"\left\[" + r", ".join(self._print(el) for el in printset) + r"\right\]") _print_SeqPer = _print_SeqFormula _print_SeqAdd = _print_SeqFormula _print_SeqMul = _print_SeqFormula def _print_Interval(self, i): if i.start == i.end: return r"\left\{%s\right\}" % self._print(i.start) else: if i.left_open: left = '(' else: left = '[' if i.right_open: right = ')' else: right = ']' return r"\left%s%s, %s\right%s" % \ (left, self._print(i.start), self._print(i.end), right) def _print_Union(self, u): return r" \cup ".join([self._print(i) for i in u.args]) def _print_Complement(self, u): return r" \setminus ".join([self._print(i) for i in u.args]) def _print_Intersection(self, u): return r" \cap ".join([self._print(i) for i in u.args]) def _print_SymmetricDifference(self, u): return r" \triangle ".join([self._print(i) for i in u.args]) def _print_EmptySet(self, e): return r"\emptyset" def _print_Naturals(self, n): return r"\mathbb{N}" def _print_Integers(self, i): return r"\mathbb{Z}" def _print_Reals(self, i): return r"\mathbb{R}" def _print_Complexes(self, i): return r"\mathbb{C}" def _print_ImageSet(self, s): return r"\left\{%s\; |\; %s \in %s\right\}" % ( self._print(s.lamda.expr), ', '.join([self._print(var) for var in s.lamda.variables]), self._print(s.base_set)) def _print_ConditionSet(self, s): vars_print = ', '.join([self._print(var) for var in s.condition.variables]) return r"\left\{%s\; |\; %s \in %s \wedge %s \right\}" % ( vars_print, vars_print, self._print(s.base_set), self._print(s.condition.expr)) def _print_Contains(self, e): return r"%s \in %s" % tuple(self._print(a) for a in e.args) def _print_FourierSeries(self, s): return self._print_Add(s.truncate()) + self._print(' + \ldots') def _print_FormalPowerSeries(self, s): return self._print_Add(s.truncate()) def _print_FormalPowerSeries(self, s): return self._print_Add(s.truncate()) def _print_FiniteField(self, expr): return r"\mathbb{F}_{%s}" % expr.mod def _print_IntegerRing(self, expr): return r"\mathbb{Z}" def _print_RationalField(self, expr): return r"\mathbb{Q}" def _print_RealField(self, expr): return r"\mathbb{R}" def _print_ComplexField(self, expr): return r"\mathbb{C}" def _print_PolynomialRing(self, expr): domain = self._print(expr.domain) symbols = ", ".join(map(self._print, expr.symbols)) return r"%s\left[%s\right]" % (domain, symbols) def _print_FractionField(self, expr): domain = self._print(expr.domain) symbols = ", ".join(map(self._print, expr.symbols)) return r"%s\left(%s\right)" % (domain, symbols) def _print_PolynomialRingBase(self, expr): domain = self._print(expr.domain) symbols = ", ".join(map(self._print, expr.symbols)) inv = "" if not expr.is_Poly: inv = r"S_<^{-1}" return r"%s%s\left[%s\right]" % (inv, domain, symbols) def _print_Poly(self, poly): cls = poly.__class__.__name__ expr = self._print(poly.as_expr()) gens = list(map(self._print, poly.gens)) domain = "domain=%s" % self._print(poly.get_domain()) args = ", ".join([expr] + gens + [domain]) if cls in accepted_latex_functions: tex = r"\%s {\left (%s \right )}" % (cls, args) else: tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args) return tex def _print_RootOf(self, root): cls = root.__class__.__name__ expr = self._print(root.expr) index = root.index if cls in accepted_latex_functions: return r"\%s {\left(%s, %d\right)}" % (cls, expr, index) else: return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr, index) def _print_RootSum(self, expr): cls = expr.__class__.__name__ args = [self._print(expr.expr)] if expr.fun is not S.IdentityFunction: args.append(self._print(expr.fun)) if cls in accepted_latex_functions: return r"\%s {\left(%s\right)}" % (cls, ", ".join(args)) else: return r"\operatorname{%s} {\left(%s\right)}" % (cls, ", ".join(args)) def _print_PolyElement(self, poly): mul_symbol = self._settings['mul_symbol_latex'] return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol) def _print_FracElement(self, frac): if frac.denom == 1: return self._print(frac.numer) else: numer = self._print(frac.numer) denom = self._print(frac.denom) return r"\frac{%s}{%s}" % (numer, denom) def _print_euler(self, expr): return r"E_{%s}" % self._print(expr.args[0]) def _print_catalan(self, expr): return r"C_{%s}" % self._print(expr.args[0]) def _print_MellinTransform(self, expr): return r"\mathcal{M}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_InverseMellinTransform(self, expr): return r"\mathcal{M}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_LaplaceTransform(self, expr): return r"\mathcal{L}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_InverseLaplaceTransform(self, expr): return r"\mathcal{L}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_FourierTransform(self, expr): return r"\mathcal{F}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_InverseFourierTransform(self, expr): return r"\mathcal{F}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_SineTransform(self, expr): return r"\mathcal{SIN}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_InverseSineTransform(self, expr): return r"\mathcal{SIN}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_CosineTransform(self, expr): return r"\mathcal{COS}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_InverseCosineTransform(self, expr): return r"\mathcal{COS}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2])) def _print_DMP(self, p): try: if p.ring is not None: # TODO incorporate order return self._print(p.ring.to_sympy(p)) except SympifyError: pass return self._print(repr(p)) def _print_DMF(self, p): return self._print_DMP(p) def _print_Object(self, object): return self._print(Symbol(object.name)) def _print_Morphism(self, morphism): domain = self._print(morphism.domain) codomain = self._print(morphism.codomain) return "%s\\rightarrow %s" % (domain, codomain) def _print_NamedMorphism(self, morphism): pretty_name = self._print(Symbol(morphism.name)) pretty_morphism = self._print_Morphism(morphism) return "%s:%s" % (pretty_name, pretty_morphism) def _print_IdentityMorphism(self, morphism): from sympy.categories import NamedMorphism return self._print_NamedMorphism(NamedMorphism( morphism.domain, morphism.codomain, "id")) def _print_CompositeMorphism(self, morphism): # All components of the morphism have names and it is thus # possible to build the name of the composite. component_names_list = [self._print(Symbol(component.name)) for component in morphism.components] component_names_list.reverse() component_names = "\\circ ".join(component_names_list) + ":" pretty_morphism = self._print_Morphism(morphism) return component_names + pretty_morphism def _print_Category(self, morphism): return "\\mathbf{%s}" % self._print(Symbol(morphism.name)) def _print_Diagram(self, diagram): if not diagram.premises: # This is an empty diagram. return self._print(S.EmptySet) latex_result = self._print(diagram.premises) if diagram.conclusions: latex_result += "\\Longrightarrow %s" % \ self._print(diagram.conclusions) return latex_result def _print_DiagramGrid(self, grid): latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width) for i in range(grid.height): for j in range(grid.width): if grid[i, j]: latex_result += latex(grid[i, j]) latex_result += " " if j != grid.width - 1: latex_result += "& " if i != grid.height - 1: latex_result += "\\\\" latex_result += "\n" latex_result += "\\end{array}\n" return latex_result def _print_FreeModule(self, M): return '{%s}^{%s}' % (self._print(M.ring), self._print(M.rank)) def _print_FreeModuleElement(self, m): # Print as row vector for convenience, for now. return r"\left[ %s \right]" % ",".join( '{' + self._print(x) + '}' for x in m) def _print_SubModule(self, m): return r"\left< %s \right>" % ",".join( '{' + self._print(x) + '}' for x in m.gens) def _print_ModuleImplementedIdeal(self, m): return r"\left< %s \right>" % ",".join( '{' + self._print(x) + '}' for [x] in m._module.gens) def _print_QuotientRing(self, R): # TODO nicer fractions for few generators... return r"\frac{%s}{%s}" % (self._print(R.ring), self._print(R.base_ideal)) def _print_QuotientRingElement(self, x): return r"{%s} + {%s}" % (self._print(x.data), self._print(x.ring.base_ideal)) def _print_QuotientModuleElement(self, m): return r"{%s} + {%s}" % (self._print(m.data), self._print(m.module.killed_module)) def _print_QuotientModule(self, M): # TODO nicer fractions for few generators... return r"\frac{%s}{%s}" % (self._print(M.base), self._print(M.killed_module)) def _print_MatrixHomomorphism(self, h): return r"{%s} : {%s} \to {%s}" % (self._print(h._sympy_matrix()), self._print(h.domain), self._print(h.codomain)) def _print_BaseScalarField(self, field): string = field._coord_sys._names[field._index] return r'\boldsymbol{\mathrm{%s}}' % self._print(Symbol(string)) def _print_BaseVectorField(self, field): string = field._coord_sys._names[field._index] return r'\partial_{%s}' % self._print(Symbol(string)) def _print_Differential(self, diff): field = diff._form_field if hasattr(field, '_coord_sys'): string = field._coord_sys._names[field._index] return r'\mathrm{d}%s' % self._print(Symbol(string)) else: return 'd(%s)' % self._print(field) string = self._print(field) return r'\mathrm{d}\left(%s\right)' % string def _print_Tr(self, p): #Todo: Handle indices contents = self._print(p.args[0]) return r'\mbox{Tr}\left(%s\right)' % (contents) def _print_totient(self, expr): return r'\phi\left( %s \right)' % self._print(expr.args[0]) def _print_divisor_sigma(self, expr, exp=None): if len(expr.args) == 2: tex = r"_%s\left(%s\right)" % tuple(map(self._print, (expr.args[1], expr.args[0]))) else: tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"\sigma^{%s}%s" % (self._print(exp), tex) return r"\sigma%s" % tex def _print_udivisor_sigma(self, expr, exp=None): if len(expr.args) == 2: tex = r"_%s\left(%s\right)" % tuple(map(self._print, (expr.args[1], expr.args[0]))) else: tex = r"\left(%s\right)" % self._print(expr.args[0]) if exp is not None: return r"\sigma^*^{%s}%s" % (self._print(exp), tex) return r"\sigma^*%s" % tex def translate(s): r''' Check for a modifier ending the string. If present, convert the modifier to latex and translate the rest recursively. Given a description of a Greek letter or other special character, return the appropriate latex. Let everything else pass as given. >>> from sympy.printing.latex import translate >>> translate('alphahatdotprime') "{\\dot{\\hat{\\alpha}}}'" ''' # Process the rest tex = tex_greek_dictionary.get(s) if tex: return tex elif s.lower() in greek_letters_set or s in other_symbols: return "\\" + s else: # Process modifiers, if any, and recurse for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True): if s.lower().endswith(key) and len(s)>len(key): return modifier_dict[key](translate(s[:-len(key)])) return s def latex(expr, **settings): r""" Convert the given expression to LaTeX representation. >>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational >>> from sympy.abc import x, y, mu, r, tau >>> print(latex((2*tau)**Rational(7,2))) 8 \sqrt{2} \tau^{\frac{7}{2}} order: Any of the supported monomial orderings (currently "lex", "grlex", or "grevlex"), "old", and "none". This parameter does nothing for Mul objects. Setting order to "old" uses the compatibility ordering for Add defined in Printer. For very large expressions, set the 'order' keyword to 'none' if speed is a concern. mode: Specifies how the generated code will be delimited. 'mode' can be one of 'plain', 'inline', 'equation' or 'equation*'. If 'mode' is set to 'plain', then the resulting code will not be delimited at all (this is the default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be used. If 'mode' is set to 'equation' or 'equation*', the resulting code will be enclosed in the 'equation' or 'equation*' environment (remember to import 'amsmath' for 'equation*'), unless the 'itex' option is set. In the latter case, the ``$$ $$`` syntax is used. >>> print(latex((2*mu)**Rational(7,2), mode='plain')) 8 \sqrt{2} \mu^{\frac{7}{2}} >>> print(latex((2*tau)**Rational(7,2), mode='inline')) $8 \sqrt{2} \tau^{\frac{7}{2}}$ >>> print(latex((2*mu)**Rational(7,2), mode='equation*')) \begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*} >>> print(latex((2*mu)**Rational(7,2), mode='equation')) \begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation} itex: Specifies if itex-specific syntax is used, including emitting ``$$ $$``. >>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True)) $$8 \sqrt{2} \mu^{\frac{7}{2}}$$ fold_frac_powers: Emit "^{p/q}" instead of "^{\frac{p}{q}}" for fractional powers. >>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True)) 8 \sqrt{2} \tau^{7/2} fold_func_brackets: Fold function brackets where applicable. >>> print(latex((2*tau)**sin(Rational(7,2)))) \left(2 \tau\right)^{\sin{\left (\frac{7}{2} \right )}} >>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets = True)) \left(2 \tau\right)^{\sin {\frac{7}{2}}} fold_short_frac: Emit "p / q" instead of "\frac{p}{q}" when the denominator is simple enough (at most two terms and no powers). The default value is `True` for inline mode, False otherwise. >>> print(latex(3*x**2/y)) \frac{3 x^{2}}{y} >>> print(latex(3*x**2/y, fold_short_frac=True)) 3 x^{2} / y long_frac_ratio: The allowed ratio of the width of the numerator to the width of the denominator before we start breaking off long fractions. The default value is 2. >>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2)) \frac{\int r\, dr}{2 \pi} >>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0)) \frac{1}{2 \pi} \int r\, dr mul_symbol: The symbol to use for multiplication. Can be one of None, "ldot", "dot", or "times". >>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times")) \left(2 \times \tau\right)^{\sin{\left (\frac{7}{2} \right )}} inv_trig_style: How inverse trig functions should be displayed. Can be one of "abbreviated", "full", or "power". Defaults to "abbreviated". >>> print(latex(asin(Rational(7,2)))) \operatorname{asin}{\left (\frac{7}{2} \right )} >>> print(latex(asin(Rational(7,2)), inv_trig_style="full")) \arcsin{\left (\frac{7}{2} \right )} >>> print(latex(asin(Rational(7,2)), inv_trig_style="power")) \sin^{-1}{\left (\frac{7}{2} \right )} mat_str: Which matrix environment string to emit. "smallmatrix", "matrix", "array", etc. Defaults to "smallmatrix" for inline mode, "matrix" for matrices of no more than 10 columns, and "array" otherwise. >>> print(latex(Matrix(2, 1, [x, y]))) \left[\begin{matrix}x\\y\end{matrix}\right] >>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array")) \left[\begin{array}{c}x\\y\end{array}\right] mat_delim: The delimiter to wrap around matrices. Can be one of "[", "(", or the empty string. Defaults to "[". >>> print(latex(Matrix(2, 1, [x, y]), mat_delim="(")) \left(\begin{matrix}x\\y\end{matrix}\right) symbol_names: Dictionary of symbols and the custom strings they should be emitted as. >>> print(latex(x**2, symbol_names={x:'x_i'})) x_i^{2} ``latex`` also supports the builtin container types list, tuple, and dictionary. >>> print(latex([2/x, y], mode='inline')) $\left [ 2 / x, \quad y\right ]$ """ return LatexPrinter(settings).doprint(expr) def print_latex(expr, **settings): """Prints LaTeX representation of the given expression.""" print(latex(expr, **settings))
bsd-3-clause
codecollision/DropboxToFlickr
django/contrib/localflavor/br/forms.py
308
5803
# -*- coding: utf-8 -*- """ BR-specific Form helpers """ from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, CharField, Select from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ import re phone_digits_re = re.compile(r'^(\d{2})[-\.]?(\d{4})[-\.]?(\d{4})$') class BRZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXXX-XXX.'), } def __init__(self, *args, **kwargs): super(BRZipCodeField, self).__init__(r'^\d{5}-\d{3}$', max_length=None, min_length=None, *args, **kwargs) class BRPhoneNumberField(Field): default_error_messages = { 'invalid': _('Phone numbers must be in XX-XXXX-XXXX format.'), } def clean(self, value): super(BRPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' value = re.sub('(\(|\)|\s+)', '', smart_unicode(value)) m = phone_digits_re.search(value) if m: return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3)) raise ValidationError(self.error_messages['invalid']) class BRStateSelect(Select): """ A Select widget that uses a list of Brazilian states/territories as its choices. """ def __init__(self, attrs=None): from br_states import STATE_CHOICES super(BRStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class BRStateChoiceField(Field): """ A choice field that uses a list of Brazilian states as its choices. """ widget = Select default_error_messages = { 'invalid': _(u'Select a valid brazilian state. That state is not one of the available states.'), } def __init__(self, required=True, widget=None, label=None, initial=None, help_text=None): super(BRStateChoiceField, self).__init__(required, widget, label, initial, help_text) from br_states import STATE_CHOICES self.widget.choices = STATE_CHOICES def clean(self, value): value = super(BRStateChoiceField, self).clean(value) if value in EMPTY_VALUES: value = u'' value = smart_unicode(value) if value == u'': return value valid_values = set([smart_unicode(k) for k, v in self.widget.choices]) if value not in valid_values: raise ValidationError(self.error_messages['invalid']) return value def DV_maker(v): if v >= 2: return 11 - v return 0 class BRCPFField(CharField): """ This field validate a CPF number or a CPF string. A CPF number is compounded by XXX.XXX.XXX-VD. The two last digits are check digits. More information: http://en.wikipedia.org/wiki/Cadastro_de_Pessoas_F%C3%ADsicas """ default_error_messages = { 'invalid': _("Invalid CPF number."), 'max_digits': _("This field requires at most 11 digits or 14 characters."), 'digits_only': _("This field requires only numbers."), } def __init__(self, *args, **kwargs): super(BRCPFField, self).__init__(max_length=14, min_length=11, *args, **kwargs) def clean(self, value): """ Value can be either a string in the format XXX.XXX.XXX-XX or an 11-digit number. """ value = super(BRCPFField, self).clean(value) if value in EMPTY_VALUES: return u'' orig_value = value[:] if not value.isdigit(): value = re.sub("[-\.]", "", value) try: int(value) except ValueError: raise ValidationError(self.error_messages['digits_only']) if len(value) != 11: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value class BRCNPJField(Field): default_error_messages = { 'invalid': _("Invalid CNPJ number."), 'digits_only': _("This field requires only numbers."), 'max_digits': _("This field requires at least 14 digits"), } def clean(self, value): """ Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a group of 14 characters. """ value = super(BRCNPJField, self).clean(value) if value in EMPTY_VALUES: return u'' orig_value = value[:] if not value.isdigit(): value = re.sub("[-/\.]", "", value) try: int(value) except ValueError: raise ValidationError(self.error_messages['digits_only']) if len(value) != 14: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(5, 1, -1) + range(9, 1, -1))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(6, 1, -1) + range(9, 1, -1))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value
bsd-3-clause
catapult-project/catapult
third_party/gsutil/gslib/vendored/boto/boto/swf/layer1_decisions.py
153
11938
""" Helper class for creating decision responses. """ class Layer1Decisions(object): """ Use this object to build a list of decisions for a decision response. Each method call will add append a new decision. Retrieve the list of decisions from the _data attribute. """ def __init__(self): self._data = [] def schedule_activity_task(self, activity_id, activity_type_name, activity_type_version, task_list=None, control=None, heartbeat_timeout=None, schedule_to_close_timeout=None, schedule_to_start_timeout=None, start_to_close_timeout=None, input=None): """ Schedules an activity task. :type activity_id: string :param activity_id: The activityId of the type of the activity being scheduled. :type activity_type_name: string :param activity_type_name: The name of the type of the activity being scheduled. :type activity_type_version: string :param activity_type_version: The version of the type of the activity being scheduled. :type task_list: string :param task_list: If set, specifies the name of the task list in which to schedule the activity task. If not specified, the defaultTaskList registered with the activity type will be used. Note: a task list for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default task list was specified at registration time then a fault will be returned. """ o = {} o['decisionType'] = 'ScheduleActivityTask' attrs = o['scheduleActivityTaskDecisionAttributes'] = {} attrs['activityId'] = activity_id attrs['activityType'] = { 'name': activity_type_name, 'version': activity_type_version, } if task_list is not None: attrs['taskList'] = {'name': task_list} if control is not None: attrs['control'] = control if heartbeat_timeout is not None: attrs['heartbeatTimeout'] = heartbeat_timeout if schedule_to_close_timeout is not None: attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout if schedule_to_start_timeout is not None: attrs['scheduleToStartTimeout'] = schedule_to_start_timeout if start_to_close_timeout is not None: attrs['startToCloseTimeout'] = start_to_close_timeout if input is not None: attrs['input'] = input self._data.append(o) def request_cancel_activity_task(self, activity_id): """ Attempts to cancel a previously scheduled activity task. If the activity task was scheduled but has not been assigned to a worker, then it will be canceled. If the activity task was already assigned to a worker, then the worker will be informed that cancellation has been requested in the response to RecordActivityTaskHeartbeat. """ o = {} o['decisionType'] = 'RequestCancelActivityTask' attrs = o['requestCancelActivityTaskDecisionAttributes'] = {} attrs['activityId'] = activity_id self._data.append(o) def record_marker(self, marker_name, details=None): """ Records a MarkerRecorded event in the history. Markers can be used for adding custom information in the history for instance to let deciders know that they do not need to look at the history beyond the marker event. """ o = {} o['decisionType'] = 'RecordMarker' attrs = o['recordMarkerDecisionAttributes'] = {} attrs['markerName'] = marker_name if details is not None: attrs['details'] = details self._data.append(o) def complete_workflow_execution(self, result=None): """ Closes the workflow execution and records a WorkflowExecutionCompleted event in the history """ o = {} o['decisionType'] = 'CompleteWorkflowExecution' attrs = o['completeWorkflowExecutionDecisionAttributes'] = {} if result is not None: attrs['result'] = result self._data.append(o) def fail_workflow_execution(self, reason=None, details=None): """ Closes the workflow execution and records a WorkflowExecutionFailed event in the history. """ o = {} o['decisionType'] = 'FailWorkflowExecution' attrs = o['failWorkflowExecutionDecisionAttributes'] = {} if reason is not None: attrs['reason'] = reason if details is not None: attrs['details'] = details self._data.append(o) def cancel_workflow_executions(self, details=None): """ Closes the workflow execution and records a WorkflowExecutionCanceled event in the history. """ o = {} o['decisionType'] = 'CancelWorkflowExecution' attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {} if details is not None: attrs['details'] = details self._data.append(o) def continue_as_new_workflow_execution(self, child_policy=None, execution_start_to_close_timeout=None, input=None, tag_list=None, task_list=None, start_to_close_timeout=None, workflow_type_version=None): """ Closes the workflow execution and starts a new workflow execution of the same type using the same workflow id and a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded in the history. """ o = {} o['decisionType'] = 'ContinueAsNewWorkflowExecution' attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {} if child_policy is not None: attrs['childPolicy'] = child_policy if execution_start_to_close_timeout is not None: attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout if input is not None: attrs['input'] = input if tag_list is not None: attrs['tagList'] = tag_list if task_list is not None: attrs['taskList'] = {'name': task_list} if start_to_close_timeout is not None: attrs['taskStartToCloseTimeout'] = start_to_close_timeout if workflow_type_version is not None: attrs['workflowTypeVersion'] = workflow_type_version self._data.append(o) def start_timer(self, start_to_fire_timeout, timer_id, control=None): """ Starts a timer for this workflow execution and records a TimerStarted event in the history. This timer will fire after the specified delay and record a TimerFired event. """ o = {} o['decisionType'] = 'StartTimer' attrs = o['startTimerDecisionAttributes'] = {} attrs['startToFireTimeout'] = start_to_fire_timeout attrs['timerId'] = timer_id if control is not None: attrs['control'] = control self._data.append(o) def cancel_timer(self, timer_id): """ Cancels a previously started timer and records a TimerCanceled event in the history. """ o = {} o['decisionType'] = 'CancelTimer' attrs = o['cancelTimerDecisionAttributes'] = {} attrs['timerId'] = timer_id self._data.append(o) def signal_external_workflow_execution(self, workflow_id, signal_name, run_id=None, control=None, input=None): """ Requests a signal to be delivered to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated event in the history. """ o = {} o['decisionType'] = 'SignalExternalWorkflowExecution' attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {} attrs['workflowId'] = workflow_id attrs['signalName'] = signal_name if run_id is not None: attrs['runId'] = run_id if control is not None: attrs['control'] = control if input is not None: attrs['input'] = input self._data.append(o) def request_cancel_external_workflow_execution(self, workflow_id, control=None, run_id=None): """ Requests that a request be made to cancel the specified external workflow execution and records a RequestCancelExternalWorkflowExecutionInitiated event in the history. """ o = {} o['decisionType'] = 'RequestCancelExternalWorkflowExecution' attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {} attrs['workflowId'] = workflow_id if control is not None: attrs['control'] = control if run_id is not None: attrs['runId'] = run_id self._data.append(o) def start_child_workflow_execution(self, workflow_type_name, workflow_type_version, workflow_id, child_policy=None, control=None, execution_start_to_close_timeout=None, input=None, tag_list=None, task_list=None, task_start_to_close_timeout=None): """ Requests that a child workflow execution be started and records a StartChildWorkflowExecutionInitiated event in the history. The child workflow execution is a separate workflow execution with its own history. """ o = {} o['decisionType'] = 'StartChildWorkflowExecution' attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {} attrs['workflowType'] = { 'name': workflow_type_name, 'version': workflow_type_version, } attrs['workflowId'] = workflow_id if child_policy is not None: attrs['childPolicy'] = child_policy if control is not None: attrs['control'] = control if execution_start_to_close_timeout is not None: attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout if input is not None: attrs['input'] = input if tag_list is not None: attrs['tagList'] = tag_list if task_list is not None: attrs['taskList'] = {'name': task_list} if task_start_to_close_timeout is not None: attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout self._data.append(o)
bsd-3-clause
abzaloid/maps
django-project/lib/python2.7/site-packages/django/core/servers/fastcgi.py
170
6631
""" FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol. Uses the flup python package: http://www.saddi.com/software/flup/ This is an adaptation of the flup package to add FastCGI server support to run Django apps from Web servers that support the FastCGI protocol. This module can be run standalone or from the django-admin / manage.py scripts using the "runfcgi" directive. Run with the extra option "help" for a list of additional options you can pass to this server. """ import importlib import os import sys __version__ = "0.1" __all__ = ["runfastcgi"] FASTCGI_OPTIONS = { 'protocol': 'fcgi', 'host': None, 'port': None, 'socket': None, 'method': 'fork', 'daemonize': None, 'workdir': '/', 'pidfile': None, 'maxspare': 5, 'minspare': 2, 'maxchildren': 50, 'maxrequests': 0, 'debug': None, 'outlog': None, 'errlog': None, 'umask': None, } FASTCGI_HELP = r""" Run this project as a fastcgi (or some other protocol supported by flup) application. To do this, the flup package from http://www.saddi.com/software/flup/ is required. runfcgi [options] [fcgi settings] Optional Fcgi settings: (setting=value) protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s) host=HOSTNAME hostname to listen on. port=PORTNUM port to listen on. socket=FILE UNIX socket to listen on. method=IMPL prefork or threaded (default %(method)s). maxrequests=NUMBER number of requests a child handles before it is killed and a new child is forked (0 = no limit). maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s). minspare=NUMBER min number of spare processes / threads (default %(minspare)s). maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s). daemonize=BOOL whether to detach from terminal. pidfile=FILE write the spawned process-id to this file. workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s). debug=BOOL set to true to enable flup tracebacks. outlog=FILE write stdout to this file. errlog=FILE write stderr to this file. umask=UMASK umask to use when daemonizing, in octal notation (default 022). Examples: Run a "standard" fastcgi process on a file-descriptor (for Web servers which spawn your processes for you) $ manage.py runfcgi method=threaded Run a scgi server on a TCP host/port $ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025 Run a fastcgi server on a UNIX domain socket (posix platforms only) $ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock Run a fastCGI as a daemon and write the spawned PID in a file $ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \ daemonize=true pidfile=/var/run/django-fcgi.pid """ % FASTCGI_OPTIONS def fastcgi_help(message=None): print(FASTCGI_HELP) if message: print(message) return False def runfastcgi(argset=[], **kwargs): options = FASTCGI_OPTIONS.copy() options.update(kwargs) for x in argset: if "=" in x: k, v = x.split('=', 1) else: k, v = x, True options[k.lower()] = v if "help" in options: return fastcgi_help() try: import flup # NOQA except ImportError as e: sys.stderr.write("ERROR: %s\n" % e) sys.stderr.write(" Unable to load the flup package. In order to run django\n") sys.stderr.write(" as a FastCGI application, you will need to get flup from\n") sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n") sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n") return False flup_module = 'server.' + options['protocol'] if options['method'] in ('prefork', 'fork'): wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxChildren': int(options["maxchildren"]), 'maxRequests': int(options["maxrequests"]), } flup_module += '_fork' elif options['method'] in ('thread', 'threaded'): wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxThreads': int(options["maxchildren"]), } else: return fastcgi_help("ERROR: Implementation must be one of prefork or " "thread.") wsgi_opts['debug'] = options['debug'] is not None try: module = importlib.import_module('.%s' % flup_module, 'flup') WSGIServer = module.WSGIServer except Exception: print("Can't import flup." + flup_module) return False # Prep up and go from django.core.servers.basehttp import get_internal_wsgi_application if options["host"] and options["port"] and not options["socket"]: wsgi_opts['bindAddress'] = (options["host"], int(options["port"])) elif options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = options["socket"] elif not options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = None else: return fastcgi_help("Invalid combination of host, port, socket.") if options["daemonize"] is None: # Default to daemonizing if we're running on a socket/named pipe. daemonize = (wsgi_opts['bindAddress'] is not None) else: if options["daemonize"].lower() in ('true', 'yes', 't'): daemonize = True elif options["daemonize"].lower() in ('false', 'no', 'f'): daemonize = False else: return fastcgi_help("ERROR: Invalid option for daemonize " "parameter.") daemon_kwargs = {} if options['outlog']: daemon_kwargs['out_log'] = options['outlog'] if options['errlog']: daemon_kwargs['err_log'] = options['errlog'] if options['umask']: daemon_kwargs['umask'] = int(options['umask'], 8) if daemonize: from django.utils.daemonize import become_daemon become_daemon(our_home_dir=options["workdir"], **daemon_kwargs) if options["pidfile"]: with open(options["pidfile"], "w") as fp: fp.write("%d\n" % os.getpid()) WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run() if __name__ == '__main__': runfastcgi(sys.argv[1:])
mit
sajuptpm/murano
murano/dsl/exceptions.py
1
3632
# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class InternalFlowException(Exception): pass class ReturnException(InternalFlowException): def __init__(self, value): self._value = value @property def value(self): return self._value class BreakException(InternalFlowException): pass class ContinueException(InternalFlowException): pass class DslInvalidOperationError(Exception): pass class NoMethodFound(Exception): def __init__(self, name): super(NoMethodFound, self).__init__('Method "%s" is not found' % name) class NoClassFound(Exception): def __init__(self, name): super(NoClassFound, self).__init__('Class "%s" is not found' % name) class NoPackageFound(Exception): def __init__(self, name): super(NoPackageFound, self).__init__( 'Package "%s" is not found' % name) class NoPackageForClassFound(Exception): def __init__(self, name): super(NoPackageForClassFound, self).__init__('Package for class "%s" ' 'is not found' % name) class NoObjectFoundError(Exception): def __init__(self, object_id): super(NoObjectFoundError, self).__init__( 'Object "%s" is not found in object store' % object_id) class AmbiguousMethodName(Exception): def __init__(self, name): super(AmbiguousMethodName, self).__init__( 'Found more that one method "%s"' % name) class DslContractSyntaxError(Exception): pass class ContractViolationException(Exception): pass class ValueIsMissingError(Exception): pass class DslSyntaxError(Exception): pass class PropertyAccessError(Exception): pass class AmbiguousPropertyNameError(PropertyAccessError): def __init__(self, name): super(AmbiguousPropertyNameError, self).__init__( 'Found more that one property "%s"' % name) class NoWriteAccess(PropertyAccessError): def __init__(self, name): super(NoWriteAccess, self).__init__( 'Property "%s" is immutable to the caller' % name) class NoWriteAccessError(PropertyAccessError): def __init__(self, name): super(NoWriteAccessError, self).__init__( 'Property "%s" is immutable to the caller' % name) class PropertyReadError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Property "%s" in class "%s" cannot be read' % (name, murano_class.name)) class PropertyWriteError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Property "%s" in class "%s" cannot be written' % (name, murano_class.name)) class UninitializedPropertyAccessError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Access to uninitialized property ' '"%s" in class "%s" is forbidden' % (name, murano_class.name))
apache-2.0
rafafigueroa/amrws
src/amrpkg/scripts/virtual_tracking.py
1
5059
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy from nav_msgs.msg import Odometry from geometry_msgs.msg import Twist from geometry_msgs.msg import PoseStamped from tf.transformations import euler_from_quaternion import numpy as np def minAngle(ang): return np.arctan2(np.sin(ang), np.cos(ang)) def orientation_to_yaw(orientation): quat_list = [0, 0, 0, 0] quat_list[0] = orientation.x quat_list[1] = orientation.y quat_list[2] = orientation.z quat_list[3] = orientation.w (roll, pitch, yaw) = euler_from_quaternion(quat_list) return yaw class SimModel(object): """Provides a consistent usage of simulation models""" def __init__(self, control): self.control = control class SimMain(object): def __init__(self, model, hz = 50.0): self.state = [None]*6 self.model = model rospy.init_node('amr_control') self.rate = rospy.Rate(hz) self.pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10) rospy.Subscriber('/odom', Odometry, self.state_callback) rospy.Subscriber('/virtual_agent/pose', PoseStamped, self.virtual_state_callback) print("simulation initialized") #TODO: Wait for topic def state_callback(self, robot_od): x = robot_od.pose.pose.position.x y = robot_od.pose.pose.position.y h = orientation_to_yaw(robot_od.pose.pose.orientation) #print 'state', x, y , h self.state[0] = x self.state[1] = y self.state[2] = h def virtual_state_callback(self, robot_ps): xr = robot_ps.pose.position.x yr = robot_ps.pose.position.y hr = orientation_to_yaw(robot_ps.pose.orientation) self.state[3] = xr self.state[4] = yr self.state[5] = hr #print 'virtual', xr, yr, hr def run(self): print("simulation running") while not rospy.is_shutdown(): if (self.state[0] is not None) and \ (self.state[3] is not None): u = self.model.control(self.state) v = u[0] w = u[1] robot_tw = Twist() robot_tw.linear.x = v robot_tw.angular.z = w self.pub.publish(robot_tw) self.rate.sleep() #TODO: Make general, currently copy/paste from virtual vr = 0.5 wr = -0.05 def control_virtual_linear(X): x = X[0] y = X[1] h = X[2] xr = X[3] yr = X[4] hr = X[5] # (s+2*xi*alpha)*(s**2 + 2*xi*alpha*s + alpha**2) # poles at -p1r +- p1i*j and -p2 # (s+p2)*(s+p1r+p1i)*(s+p1r-p1i) # (s+p2)*(s**2 + 2*p1r*s + p1r**2 + p1i**2) # 2*xi*alpha = p2 # 2*xi*alpha = 2*p1r # alpha**2 = p1r**2 + p1i**2 # for p1r = 1, p1i = 1, p2 = 2*p1r = 2 # Linear p1r = 1 p1i = 0.3 p2 = 2*p1r alpha = np.sqrt(p1r**2+p1i**2) xi = p2/float(2*alpha) b = (alpha**2 - wr**2)/float(vr**2) k1 = 2*xi*alpha k2 = b * np.abs(vr) k3 = k1 ex = xr - x ey = yr - y eh = minAngle(hr - h) e1 = ex*np.cos(h) + ey*np.sin(h) e2 = -ex*np.sin(h) + ey*np.cos(h) e3 = eh u1 = -k1 * e1 u2 = -k2 * np.sign(vr) * e2 - k3 * e3 v = vr * np.cos(e3) - u1 w = wr - u2 Erms = np.sqrt(e1**2 + e2**2 + e3**2) print 'Erms:', Erms, 'v', v, 'w', w return [v, w] def control_virtual_nonlinear(X): x = X[0] y = X[1] h = X[2] xr = X[3] yr = X[4] hr = X[5] # (s+2*xi*alpha)*(s**2 + 2*xi*alpha*s + alpha**2) # poles at -p1r +- p1i*j and -p2 # (s+p2)*(s+p1r+p1i)*(s+p1r-p1i) # (s+p2)*(s**2 + 2*p1r*s + p1r**2 + p1i**2) # 2*xi*alpha = p2 # 2*xi*alpha = 2*p1r # alpha**2 = p1r**2 + p1i**2 # for p1r = 1, p1i = 1, p2 = 2*p1r = 2 # Nonlinear p1r = 0.5 p1i = 0.1 p2 = 2*p1r alpha = np.sqrt(p1r**2+p1i**2) xi = p2/float(2*alpha) b = (alpha**2 - wr**2)/float(vr**2) k1 = 2*xi*np.sqrt(wr**2 + b*vr**2) k2 = b * np.abs(vr) k3 = k1 k4 = b ex = xr - x ey = yr - y eh = minAngle(hr - h) e1 = ex*np.cos(h) + ey*np.sin(h) e2 = -ex*np.sin(h) + ey*np.cos(h) e3 = eh u1 = -k1 * e1 u2 = -k4 * vr * np.sin(e3)/(e3+0.001) * e2 - k3 * e3 v = vr * np.cos(e3) - u1 w = wr - u2 Erms = np.sqrt(e1**2 + e2**2 + e3**2) print 'Erms:', Erms, 'v', v, 'w', w return [v, w] if __name__ == '__main__': sim_model = SimModel(control = control_virtual_nonlinear) sim = SimMain(sim_model) try: sim.run() except rospy.ROSInterruptException: pass
mit
knehez/edx-platform
lms/djangoapps/shoppingcart/urls.py
159
1390
from django.conf.urls import patterns, url from django.conf import settings urlpatterns = patterns( 'shoppingcart.views', url(r'^postpay_callback/$', 'postpay_callback'), # Both the ~accept and ~reject callback pages are handled here url(r'^receipt/(?P<ordernum>[0-9]*)/$', 'show_receipt'), url(r'^donation/$', 'donate', name='donation'), url(r'^csv_report/$', 'csv_report', name='payment_csv_report'), # These following URLs are only valid if the ENABLE_SHOPPING_CART feature flag is set url(r'^$', 'show_cart'), url(r'^clear/$', 'clear_cart'), url(r'^remove_item/$', 'remove_item'), url(r'^add/course/{}/$'.format(settings.COURSE_ID_PATTERN), 'add_course_to_cart', name='add_course_to_cart'), url(r'^register/redeem/(?P<registration_code>[0-9A-Za-z]+)/$', 'register_code_redemption', name='register_code_redemption'), url(r'^use_code/$', 'use_code'), url(r'^update_user_cart/$', 'update_user_cart'), url(r'^reset_code_redemption/$', 'reset_code_redemption'), url(r'^billing_details/$', 'billing_details', name='billing_details'), url(r'^verify_cart/$', 'verify_cart'), ) if settings.FEATURES.get('ENABLE_PAYMENT_FAKE'): from shoppingcart.tests.payment_fake import PaymentFakeView urlpatterns += patterns( 'shoppingcart.tests.payment_fake', url(r'^payment_fake', PaymentFakeView.as_view()), )
agpl-3.0
cxysteven/Paddle
python/paddle/v2/dataset/imikolov.py
1
3909
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ imikolov's simple dataset. This module will download dataset from http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set into paddle reader creators. """ import paddle.v2.dataset.common import collections import tarfile __all__ = ['train', 'test', 'build_dict'] URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' def word_count(f, word_freq=None): if word_freq is None: word_freq = collections.defaultdict(int) for l in f: for w in l.strip().split(): word_freq[w] += 1 word_freq['<s>'] += 1 word_freq['<e>'] += 1 return word_freq def build_dict(): """ Build a word dictionary from the corpus, Keys of the dictionary are words, and values are zero-based IDs of these words. """ train_filename = './simple-examples/data/ptb.train.txt' test_filename = './simple-examples/data/ptb.valid.txt' with tarfile.open( paddle.v2.dataset.common.download( paddle.v2.dataset.imikolov.URL, 'imikolov', paddle.v2.dataset.imikolov.MD5)) as tf: trainf = tf.extractfile(train_filename) testf = tf.extractfile(test_filename) word_freq = word_count(testf, word_count(trainf)) if '<unk>' in word_freq: # remove <unk> for now, since we will set it as last index del word_freq['<unk>'] TYPO_FREQ = 50 word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items()) word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*word_freq_sorted)) word_idx = dict(zip(words, xrange(len(words)))) word_idx['<unk>'] = len(words) return word_idx def reader_creator(filename, word_idx, n): def reader(): with tarfile.open( paddle.v2.dataset.common.download( paddle.v2.dataset.imikolov.URL, 'imikolov', paddle.v2.dataset.imikolov.MD5)) as tf: f = tf.extractfile(filename) UNK = word_idx['<unk>'] for l in f: l = ['<s>'] + l.strip().split() + ['<e>'] if len(l) >= n: l = [word_idx.get(w, UNK) for w in l] for i in range(n, len(l) + 1): yield tuple(l[i - n:i]) return reader def train(word_idx, n): """ imikolov training set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size :type n: int :return: Training reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n) def test(word_idx, n): """ imikolov test set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size :type n: int :return: Test reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n) def fetch(): paddle.v2.dataset.common.download(URL, "imikolov", MD5)
apache-2.0
peterfpeterson/mantid
scripts/Muon/GUI/Common/calculate_pair_and_group.py
3
5823
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + import Muon.GUI.Common.utilities.algorithm_utils as algorithm_utils from Muon.GUI.Common.utilities.run_string_utils import run_list_to_string from Muon.GUI.Common.muon_pair import MuonPair from typing import Iterable def calculate_group_data(context, group, run, rebin, workspace_name, periods): processed_data = get_pre_process_workspace_name(run, context.data_context.instrument) params = _get_MuonGroupingCounts_parameters(group, periods) params["InputWorkspace"] = processed_data group_data = algorithm_utils.run_MuonGroupingCounts(params, workspace_name) return group_data def calculate_pair_data(pair: MuonPair, forward_group: str, backward_group: str, output_workspace_name: str): params = _get_MuonPairingAsymmetry_parameters(pair, forward_group, backward_group) pair_data = algorithm_utils.run_MuonPairingAsymmetry(params, output_workspace_name) return pair_data def estimate_group_asymmetry_data(context, group, run, rebin, workspace_name, unormalised_workspace_name, periods): processed_data = get_pre_process_workspace_name(run, context.data_context.instrument) params = _get_MuonGroupingAsymmetry_parameters(context, group, run, periods) params["InputWorkspace"] = processed_data group_asymmetry, group_asymmetry_unnorm = algorithm_utils.run_MuonGroupingAsymmetry(params, workspace_name, unormalised_workspace_name) return group_asymmetry, group_asymmetry_unnorm def run_pre_processing(context, run, rebin): params = _get_pre_processing_params(context, run, rebin) params["InputWorkspace"] = context.data_context.loaded_workspace_as_group(run) processed_data = algorithm_utils.run_MuonPreProcess(params) return processed_data def get_pre_process_workspace_name(run: Iterable[int], instrument: str) -> str: workspace_name = "".join(["__", instrument, run_list_to_string(run), "_pre_processed_data"]) return workspace_name def _get_pre_processing_params(context, run, rebin): pre_process_params = {} try: if context.gui_context['FirstGoodDataFromFile']: time_min = context.data_context.get_loaded_data_for_run(run)["FirstGoodData"] else: time_min = context.gui_context['FirstGoodData'] pre_process_params["TimeMin"] = time_min except KeyError: pass try: if context.gui_context['TimeZeroFromFile']: time_offset = 0.0 else: time_offset = context.data_context.get_loaded_data_for_run(run)["TimeZero"] - context.gui_context[ 'TimeZero'] pre_process_params["TimeOffset"] = time_offset except KeyError: pass if rebin: _setup_rebin_options(context, pre_process_params, run) try: dead_time_table = context.corrections_context.current_dead_time_table_name_for_run( context.data_context.instrument, run) if dead_time_table is not None: pre_process_params["DeadTimeTable"] = dead_time_table except KeyError: pass pre_process_params["OutputWorkspace"] = get_pre_process_workspace_name(run, context.data_context.instrument) return pre_process_params def _setup_rebin_options(context, pre_process_params, run): try: if context.gui_context['RebinType'] == 'Variable' and context.gui_context["RebinVariable"]: pre_process_params["RebinArgs"] = context.gui_context["RebinVariable"] except KeyError: pass try: if context.gui_context['RebinType'] == 'Fixed' and context.gui_context["RebinFixed"]: x_data = context.data_context._loaded_data.get_data(run=run, instrument=context.data_context.instrument )['workspace']['OutputWorkspace'][0].workspace.dataX(0) original_step = x_data[1] - x_data[0] pre_process_params["RebinArgs"] = float(context.gui_context["RebinFixed"]) * original_step except KeyError: pass def _get_MuonGroupingCounts_parameters(group, periods): params = {} params["SummedPeriods"] = periods if group: params["GroupName"] = group.name params["Grouping"] = ",".join([str(i) for i in group.detectors]) return params def _get_MuonGroupingAsymmetry_parameters(context, group, run, periods): params = {} if 'GroupRangeMin' in context.gui_context: params['AsymmetryTimeMin'] = context.gui_context['GroupRangeMin'] else: params['AsymmetryTimeMin'] = context.data_context.get_loaded_data_for_run(run)["FirstGoodData"] if 'GroupRangeMax' in context.gui_context: params['AsymmetryTimeMax'] = context.gui_context['GroupRangeMax'] else: params['AsymmetryTimeMax'] = max( context.data_context.get_loaded_data_for_run(run)['OutputWorkspace'][0].workspace.dataX(0)) params["SummedPeriods"] = periods if group: params["GroupName"] = group.name params["Grouping"] = ",".join([str(i) for i in group.detectors]) return params def _get_MuonPairingAsymmetry_parameters(pair: MuonPair, forward_group: str, backward_group: str): params = {} if pair: params["SpecifyGroupsManually"] = False params["PairName"] = pair.name params["InputWorkspace1"] = forward_group params["InputWorkspace2"] = backward_group params["Alpha"] = str(pair.alpha) return params
gpl-3.0
riteshshrv/django
django/http/__init__.py
341
1103
from django.http.cookie import SimpleCookie, parse_cookie from django.http.request import ( HttpRequest, QueryDict, RawPostDataException, UnreadablePostError, ) from django.http.response import ( BadHeaderError, FileResponse, Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseGone, HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseNotModified, HttpResponsePermanentRedirect, HttpResponseRedirect, HttpResponseServerError, JsonResponse, StreamingHttpResponse, ) from django.http.utils import conditional_content_removal __all__ = [ 'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict', 'RawPostDataException', 'UnreadablePostError', 'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect', 'HttpResponsePermanentRedirect', 'HttpResponseNotModified', 'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound', 'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError', 'Http404', 'BadHeaderError', 'JsonResponse', 'FileResponse', 'conditional_content_removal', ]
bsd-3-clause
angdraug/nova
nova/tests/objects/test_instance_info_cache.py
32
4744
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.cells import opts as cells_opts from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception from nova.network import model as network_model from nova.objects import instance_info_cache from nova.tests.objects import test_objects fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class _TestInstanceInfoCacheObject(object): def test_get_by_instance_uuid(self): nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn( dict(fake_info_cache, network_info=nwinfo.json())) self.mox.ReplayAll() obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid( self.context, 'fake-uuid') self.assertEqual(obj.instance_uuid, 'fake-uuid') self.assertEqual(obj.network_info, nwinfo) self.assertRemotes() def test_get_by_instance_uuid_no_entries(self): self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None) self.mox.ReplayAll() self.assertRaises( exception.InstanceInfoCacheNotFound, instance_info_cache.InstanceInfoCache.get_by_instance_uuid, self.context, 'fake-uuid') def test_new(self): obj = instance_info_cache.InstanceInfoCache.new(self.context, 'fake-uuid') self.assertEqual(set(['instance_uuid', 'network_info']), obj.obj_what_changed()) self.assertEqual('fake-uuid', obj.instance_uuid) self.assertIsNone(obj.network_info) def _save_helper(self, cell_type, update_cells): obj = instance_info_cache.InstanceInfoCache() cells_api = cells_rpcapi.CellsAPI() self.mox.StubOutWithMock(db, 'instance_info_cache_update') self.mox.StubOutWithMock(cells_opts, 'get_cell_type') self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI', use_mock_anything=True) self.mox.StubOutWithMock(cells_api, 'instance_info_cache_update_at_top') nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}]) db.instance_info_cache_update( self.context, 'fake-uuid', {'network_info': nwinfo.json()}).AndReturn('foo') if update_cells: cells_opts.get_cell_type().AndReturn(cell_type) if cell_type == 'compute': cells_rpcapi.CellsAPI().AndReturn(cells_api) cells_api.instance_info_cache_update_at_top( self.context, 'foo') self.mox.ReplayAll() obj._context = self.context obj.instance_uuid = 'fake-uuid' obj.network_info = nwinfo obj.save(update_cells=update_cells) def test_save_with_update_cells_and_compute_cell(self): self._save_helper('compute', True) def test_save_with_update_cells_and_non_compute_cell(self): self._save_helper(None, True) def test_save_without_update_cells(self): self._save_helper(None, False) def test_refresh(self): obj = instance_info_cache.InstanceInfoCache.new(self.context, 'fake-uuid1') self.mox.StubOutWithMock(db, 'instance_info_cache_get') db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn( fake_info_cache) self.mox.ReplayAll() obj.refresh() self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid) class TestInstanceInfoCacheObject(test_objects._LocalTest, _TestInstanceInfoCacheObject): pass class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest, _TestInstanceInfoCacheObject): pass
apache-2.0
apt-helion/viperidae
data/models.py
1
2405
import datetime from peewee import * from pymongo import MongoClient from .config import Config database = Config.DATABASE # monkey patch the DateTimeField to add support for the isoformt which is what # peewee exports as from DataSet DateTimeField.formats.append('%Y-%m-%dT%H:%M:%S') DateField.formats.append('%Y-%m-%dT%H:%M:%S') class UnknownField(object): def __init__(self, *_, **__): pass class BaseModel(Model): # Example usage # doc = AdminDocument.create() # doc.apply(request.form) # doc.apply(request.json) # doc.apply(request.json, required=['filename'], dates=['uploaddate']) def apply_request(self, source, ignore = None, required = None, dates = None): for field in self._meta.get_sorted_fields(): data = source.get(field) if field == "id": continue if field in ignore: continue # Verify in required_fields if field in required and data == None: return {'error': 'Empty required field'} if field in dates: data = "" # strp==]=== if data is None or data == "": continue self.__dict__[field] = data return "" class Meta: database = database class User(BaseModel): user = CharField(column_name='user_id', null=False, primary_key=True) username = CharField(column_name='username', null=False) password = CharField(column_name='password', null=False) email = CharField(column_name='email', null=False) class Meta: table_name = 'Users' class Client(BaseModel): client = CharField(column_name='client_id', null=False, primary_key=True) secret = CharField(column_name='client_secret', null=False) name = CharField(column_name='name', null=False) website = CharField(column_name='website', null=False) description = CharField(column_name='description', null=False) user = ForeignKeyField( column_name='user_id', field='user', model=User, null=False) @classmethod def get_pages(cls, name): mongo_client = MongoClient('localhost', 27017) database = mongo_client.pages collection = database[name] pages = [] for page in collection.find(): pages.append(page) return pages class Meta: table_name = 'Clients'
gpl-3.0
brianmoose/civet
client/tests/test_JobRunner.py
2
13950
# Copyright 2016 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals, absolute_import from django.test import SimpleTestCase from django.test import override_settings from ci.tests import utils as test_utils from client import JobRunner, BaseClient from client.tests import utils import os, platform from distutils import spawn from mock import patch import subprocess BaseClient.setup_logger() try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty @override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()]) class Tests(SimpleTestCase): def setUp(self): self.build_root = "/foo/bar" os.environ["BUILD_ROOT"] = self.build_root self.message_q = Queue() self.command_q = Queue() def create_runner(self): client_info = utils.default_client_info() job_info = utils.create_job_dict() runner = JobRunner.JobRunner(client_info, job_info, self.message_q, self.command_q) self.assertEqual(runner.canceled, False) self.assertEqual(runner.stopped, False) self.assertEqual(runner.global_env["var_with_root"], "%s/bar" % self.build_root) self.assertEqual(runner.job_data["steps"][0]["environment"]["step_var_with_root"], "%s/foo" % self.build_root) return runner def check_job_results(self, results, runner, complete=True, canceled=False, failed=False): self.assertEqual(results['complete'], complete) self.assertEqual(results['canceled'], canceled) self.assertEqual(results['failed'], failed) self.assertIn('seconds', results) self.assertEqual(results['client_name'], runner.client_info["client_name"]) self.assertEqual(self.message_q.qsize(), 1) msg = self.message_q.get(block=False) self.assertEqual(len(msg), 4) server = runner.client_info["server"] self.assertEqual(msg["server"], server) self.assertTrue(msg["url"].startswith(server)) self.assertEqual(msg["job_id"], runner.job_data["job_id"]) self.assertEqual(msg["payload"], results) @patch.object(JobRunner.JobRunner, 'run_step') def test_run_job(self, mock_run_step): r = self.create_runner() run_step_results = {'canceled': False, 'exit_status': 0} mock_run_step.return_value = run_step_results # normal run results = r.run_job() self.check_job_results(results, r) # test bad exit_status run_step_results['exit_status'] = 1 mock_run_step.return_value = run_step_results self.assertEqual(r.job_data["steps"][0]["abort_on_failure"], True) results = r.run_job() self.check_job_results(results, r, failed=True) # bad exit_status but don't abort r.job_data["steps"][0]["abort_on_failure"] = False results = r.run_job() self.check_job_results(results, r) # test canceled r.canceled = True results = r.run_job() self.check_job_results(results, r, canceled=True) # test stopped r.canceled = False r.stopped = True results = r.run_job() self.check_job_results(results, r) # test error r.canceled = False r.stopped = False r.error = True results = r.run_job() self.check_job_results(results, r, canceled=True, failed=True) def test_update_step(self): r = self.create_runner() step = {'step_num': 1, 'stepresult_id': 1} chunk_data = {"message": "message"} for stage in ["start", "complete", "update"]: chunk_data["message"] = stage r.update_step(stage, step, chunk_data) self.assertEqual(self.message_q.qsize(), 1) msg = self.message_q.get(block=False) self.assertEqual(len(msg), 4) server = r.client_info["server"] self.assertEqual(msg["server"], server) self.assertTrue(msg["url"].startswith(server)) self.assertIn(stage, msg["url"]) self.assertEqual(msg["job_id"], r.job_data["job_id"]) self.assertEqual(msg["payload"], chunk_data) def test_get_output_from_queue(self): r = self.create_runner() q0 = {"msg": 1} q1 = {"msg": 2} self.message_q.put(q0) self.message_q.put(q1) output = r.get_output_from_queue(self.message_q) self.assertEqual(len(output), 2) self.assertEqual(output[0], q0) self.assertEqual(output[1], q1) def test_read_command(self): r = self.create_runner() # test a command to another job cmd = {"job_id": r.job_data["job_id"], "command": "cancel"} # Test cancel command cmd["job_id"] = r.job_data["job_id"] self.assertEqual(r.canceled, False) self.command_q.put(cmd) r.read_command() self.assertEqual(r.canceled, True) # Test stop command cmd["command"] = "stop" self.command_q.put(cmd) r.canceled = False self.assertEqual(r.stopped, False) r.read_command() self.assertEqual(r.stopped, True) # Test unknown command cmd["command"] = "unknown" self.command_q.put(cmd) r.stopped = False r.read_command() self.assertEqual(r.stopped, False) self.assertEqual(r.canceled, False) # Test bad command message self.command_q.put({}) r.read_command() def test_read_process_output(self): r = self.create_runner() r.client_info["update_step_time"] = 1 with JobRunner.temp_file() as script_file: script = b"for i in $(seq 5);do echo start $i; sleep 1; echo done $i; done" script_file.write(script) script_file.close() with open(os.devnull, "wb") as devnull: proc = r.create_process(script_file.name, {}, devnull) # standard run of the subprocess, just check we get all the output out = r.read_process_output(proc, r.job_data["steps"][0], {}) proc.wait() test_out = "" self.assertGreater(self.message_q.qsize(), 3) msg_list = [] try: while True: l = self.message_q.get(block=False) msg_list.append(l) except Empty: pass for i in range(1, 6): start = "start {}\n".format(i) done = "done {}\n".format(i) if i < 4: # only do this test for the first few # since there is no guarentee that update_step() # will get called for all of them before the # process terminates found_start = False found_done = False for msg in msg_list: if start.strip() in msg["payload"]["output"]: found_start = True if done.strip() in msg["payload"]["output"]: found_done = True self.assertTrue(found_start) self.assertTrue(found_done) test_out += start + done self.assertEqual(test_out, out["output"]) self.assertEqual(out["complete"], True) self.assertGreater(out["time"], 1) proc = r.create_process(script_file.name, {}, devnull) # Test cancel while reading output self.command_q.put({"job_id": r.job_data["job_id"], "command": "cancel"}) self.assertEqual(r.canceled, False) r.read_process_output(proc, r.job_data["steps"][0], {}) proc.wait() self.assertEqual(r.canceled, True) def test_kill_job(self): with JobRunner.temp_file() as script: script.write(b"sleep 30") script.close() with open(os.devnull, "wb") as devnull: r = self.create_runner() proc = r.create_process(script.name, {}, devnull) r.kill_job(proc) self.assertEqual(proc.poll(), -15) # SIGTERM proc.wait() # get some coverage when the proc is already dead r.kill_job(proc) # the kill path for windows is different, just get some # coverage because we don't currently have a windows box # to test on with patch.object(platform, 'system') as mock_system: mock_system.side_effect = ["linux", "Windows"] proc = r.create_process(script.name, {}, devnull) r.kill_job(proc) with patch.object(spawn, 'find_executable') as mock_find: mock_system.side_effect = ["Windows"] mock_find.return_value = True r.kill_job(proc) # mimic not being able to kill the job with patch.object(subprocess.Popen, 'poll') as mock_poll, patch.object(subprocess.Popen, 'kill') as mock_kill: mock_poll.side_effect = [True, None, None] mock_kill.return_value = False proc = r.create_process(script.name, {}, devnull) r.kill_job(proc) def test_run_step(self): r = self.create_runner() r.client_info["update_step_time"] = 1 step_env_orig = r.job_data["steps"][0]["environment"].copy() global_env_orig = r.global_env.copy() results = r.run_step(r.job_data["steps"][0]) self.assertIn('test_output1', results['output']) self.assertIn('test_output2', results['output']) self.assertEqual(results['exit_status'], 0) self.assertEqual(results['canceled'], False) self.assertGreater(results['time'], 1) # Make sure run_step doesn't touch the environment self.assertEqual(r.global_env, global_env_orig) self.assertEqual(r.job_data["steps"][0]["environment"], step_env_orig) # Test output size limits work r.max_output_size = 10 results = r.run_step(r.job_data["steps"][0]) self.assertIn('command not found', results['output']) self.assertIn('Output size exceeded limit', results['output']) self.command_q.put({"job_id": r.job_data["job_id"], "command": "cancel"}) results = r.run_step(r.job_data["steps"][0]) self.assertEqual(results['canceled'], True) self.assertEqual(r.canceled, True) # just get some coverage with patch.object(JobRunner.JobRunner, "read_process_output") as mock_proc: r.canceled = False mock_proc.side_effect = Exception("Oh no!") results = r.run_step(r.job_data["steps"][0]) self.assertEqual(results['canceled'], True) self.assertEqual(r.canceled, True) # Simulate out of disk space error with patch.object(JobRunner.JobRunner, "run_step_process") as mock_run: r.canceled = False mock_run.side_effect = IOError("Oh no!") results = r.run_step(r.job_data["steps"][0]) self.assertEqual(results['exit_status'], 1) self.assertEqual(r.canceled, False) self.assertEqual(r.error, True) @patch.object(platform, 'system') def test_run_step_platform(self, mock_system): r = self.create_runner() r.client_info["update_step_time"] = 1 # Don't have a Windows box to test on but # we can get some basic coverage mock_system.return_value = "Windows" # the windows command won't work data = r.run_step(r.job_data["steps"][0]) self.assertEqual(data["time"], 0) self.assertEqual(r.stopped, True) def test_env_dict(self): r = self.create_runner() env = {"name": "value", "other": "value"} new_env = r.env_to_dict(env) self.assertEqual(env, new_env) r.env_to_dict([("name", "value"), ("other", "value")]) self.assertEqual(env, new_env) new_env = r.env_to_dict(("name", "value")) self.assertEqual({}, new_env) env["another"] = "BUILD_ROOT/foo" test_env = env.copy() r.clean_env(test_env) self.assertEqual(test_env["another"], "%s/foo" % self.build_root) test_env = env.copy() del os.environ["BUILD_ROOT"] r.clean_env(test_env) self.assertEqual(test_env["another"], "%s/foo" % os.getcwd()) def test_max_step_time(self): with JobRunner.temp_file() as script: script.write(b"sleep 30") script.close() with open(os.devnull, "wb") as devnull: r = self.create_runner() r.max_step_time = 2 proc = r.create_process(script.name, {}, devnull) out = r.read_process_output(proc, r.job_data["steps"][0], {}) self.assertIn("taking longer than the max", out["output"]) self.assertLess(out["time"], 10) self.assertEqual(out["canceled"], True)
apache-2.0
claws/AutobahnPython
autobahn/autobahn/websocket/http.py
35
19607
############################################################################### ## ## Copyright (C) 2011-2013 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### ## ## HTTP Status Codes ## ## Source: http://en.wikipedia.org/wiki/List_of_HTTP_status_codes ## Adapted on 2011/10/11 ## ## ## 1xx Informational ## ## Request received, continuing process. ## ## This class of status code indicates a provisional response, consisting only of ## the Status-Line and optional headers, and is terminated by an empty line. ## Since HTTP/1.0 did not define any 1xx status codes, servers must not send ## a 1xx response to an HTTP/1.0 client except under experimental conditions. ## CONTINUE = (100, "Continue", "This means that the server has received the request headers, and that the client should proceed to send the request body (in the case of a request for which a body needs to be sent; for example, a POST request). If the request body is large, sending it to a server when a request has already been rejected based upon inappropriate headers is inefficient. To have a server check if the request could be accepted based on the request's headers alone, a client must send Expect: 100-continue as a header in its initial request[2] and check if a 100 Continue status code is received in response before continuing (or receive 417 Expectation Failed and not continue).") SWITCHING_PROTOCOLS = (101, "Switching Protocols", "This means the requester has asked the server to switch protocols and the server is acknowledging that it will do so.") PROCESSING = (102, "Processing (WebDAV) (RFC 2518)", "As a WebDAV request may contain many sub-requests involving file operations, it may take a long time to complete the request. This code indicates that the server has received and is processing the request, but no response is available yet.[3] This prevents the client from timing out and assuming the request was lost.") CHECKPOINT = (103, "Checkpoint", "This code is used in the Resumable HTTP Requests Proposal to resume aborted PUT or POST requests.") REQUEST_URI_TOO_LONG = (122, "Request-URI too long", "This is a non-standard IE7-only code which means the URI is longer than a maximum of 2083 characters.[5][6] (See code 414.)") ## ## 2xx Success ## ## This class of status codes indicates the action requested by the client was ## received, understood, accepted and processed successfully. ## OK = (200, "OK", "Standard response for successful HTTP requests. The actual response will depend on the request method used. In a GET request, the response will contain an entity corresponding to the requested resource. In a POST request the response will contain an entity describing or containing the result of the action.") CREATED = (201, "Created", "The request has been fulfilled and resulted in a new resource being created.") ACCEPTED = (202, "Accepted", "The request has been accepted for processing, but the processing has not been completed. The request might or might not eventually be acted upon, as it might be disallowed when processing actually takes place.") NON_AUTHORATIVE = (203, "Non-Authoritative Information (since HTTP/1.1)", "The server successfully processed the request, but is returning information that may be from another source.") NO_CONTENT = (204, "No Content", "The server successfully processed the request, but is not returning any content.") RESET_CONTENT = (205, "Reset Content", "The server successfully processed the request, but is not returning any content. Unlike a 204 response, this response requires that the requester reset the document view.") PARTIAL_CONTENT = (206, "Partial Content", "The server is delivering only part of the resource due to a range header sent by the client. The range header is used by tools like wget to enable resuming of interrupted downloads, or split a download into multiple simultaneous streams.") MULTI_STATUS = (207, "Multi-Status (WebDAV) (RFC 4918)", "The message body that follows is an XML message and can contain a number of separate response codes, depending on how many sub-requests were made.") IM_USED = (226, "IM Used (RFC 3229)", "The server has fulfilled a GET request for the resource, and the response is a representation of the result of one or more instance-manipulations applied to the current instance.") ## ## 3xx Redirection ## ## The client must take additional action to complete the request. ## ## This class of status code indicates that further action needs to be taken ## by the user agent in order to fulfil the request. The action required may ## be carried out by the user agent without interaction with the user if and ## only if the method used in the second request is GET or HEAD. A user agent ## should not automatically redirect a request more than five times, since such ## redirections usually indicate an infinite loop. ## MULTIPLE_CHOICES = (300, "Multiple Choices", "Indicates multiple options for the resource that the client may follow. It, for instance, could be used to present different format options for video, list files with different extensions, or word sense disambiguation.") MOVED_PERMANENTLY = (301, "Moved Permanently", "This and all future requests should be directed to the given URI.") FOUND = (302, "Found", "This is an example of industrial practice contradicting the standard. HTTP/1.0 specification (RFC 1945) required the client to perform a temporary redirect (the original describing phrase was 'Moved Temporarily', but popular browsers implemented 302 with the functionality of a 303 See Other. Therefore, HTTP/1.1 added status codes 303 and 307 to distinguish between the two behaviours. However, some Web applications and frameworks use the 302 status code as if it were the 303.") SEE_OTHER = (303, "See Other (since HTTP/1.1)", "The response to the request can be found under another URI using a GET method. When received in response to a POST (or PUT/DELETE), it should be assumed that the server has received the data and the redirect should be issued with a separate GET message.") NOT_MODIFIED = (304, "Not Modified", "Indicates the resource has not been modified since last requested.[2] Typically, the HTTP client provides a header like the If-Modified-Since header to provide a time against which to compare. Using this saves bandwidth and reprocessing on both the server and client, as only the header data must be sent and received in comparison to the entirety of the page being re-processed by the server, then sent again using more bandwidth of the server and client.") USE_PROXY = (305, "Use Proxy (since HTTP/1.1)", "Many HTTP clients (such as Mozilla[11] and Internet Explorer) do not correctly handle responses with this status code, primarily for security reasons.") SWITCH_PROXY = (306, "Switch Proxy", "No longer used. Originally meant 'Subsequent requests should use the specified proxy'.") TEMPORARY_REDIRECT = (307, "Temporary Redirect (since HTTP/1.1)", "In this occasion, the request should be repeated with another URI, but future requests can still use the original URI.[2] In contrast to 303, the request method should not be changed when reissuing the original request. For instance, a POST request must be repeated using another POST request.") RESUME_INCOMPLETE = (308, "Resume Incomplete", "This code is used in the Resumable HTTP Requests Proposal to resume aborted PUT or POST requests.") ## ## 4xx Client Error ## ## The 4xx class of status code is intended for cases in which the client ## seems to have erred. Except when responding to a HEAD request, the server ## should include an entity containing an explanation of the error situation, ## and whether it is a temporary or permanent condition. These status codes are ## applicable to any request method. User agents should display any included ## entity to the user. These are typically the most common error codes ## encountered while online. ## BAD_REQUEST = (400, "Bad Request", "The request cannot be fulfilled due to bad syntax.") UNAUTHORIZED = (401, "Unauthorized", "Similar to 403 Forbidden, but specifically for use when authentication is possible but has failed or not yet been provided.[2] The response must include a WWW-Authenticate header field containing a challenge applicable to the requested resource. See Basic access authentication and Digest access authentication.") PAYMENT_REQUIRED = (402, "Payment Required", "Reserved for future use.[2] The original intention was that this code might be used as part of some form of digital cash or micropayment scheme, but that has not happened, and this code is not usually used. As an example of its use, however, Apple's MobileMe service generates a 402 error if the MobileMe account is delinquent.") FORBIDDEN = (403, "Forbidden", "The request was a legal request, but the server is refusing to respond to it.[2] Unlike a 401 Unauthorized response, authenticating will make no difference.[2]") NOT_FOUND = (404, "Not Found", "The requested resource could not be found but may be available again in the future.[2] Subsequent requests by the client are permissible.") METHOD_NOT_ALLOWED = (405, "Method Not Allowed", "A request was made of a resource using a request method not supported by that resource;[2] for example, using GET on a form which requires data to be presented via POST, or using PUT on a read-only resource.") NOT_ACCEPTABLE = (406, "Not Acceptable", "The requested resource is only capable of generating content not acceptable according to the Accept headers sent in the request.") PROXY_AUTH_REQUIRED = (407, "Proxy Authentication Required", "The client must first authenticate itself with the proxy.") REQUEST_TIMEOUT = (408, "Request Timeout", "The server timed out waiting for the request. According to W3 HTTP specifications: 'The client did not produce a request within the time that the server was prepared to wait. The client MAY repeat the request without modifications at any later time.'") CONFLICT = (409, "Conflict", "Indicates that the request could not be processed because of conflict in the request, such as an edit conflict.") GONE = (410, "Gone", "Indicates that the resource requested is no longer available and will not be available again.[2] This should be used when a resource has been intentionally removed and the resource should be purged. Upon receiving a 410 status code, the client should not request the resource again in the future. Clients such as search engines should remove the resource from their indices. Most use cases do not require clients and search engines to purge the resource, and a '404 Not Found' may be used instead.") LENGTH_REQUIRED = (411, "Length Required", "The request did not specify the length of its content, which is required by the requested resource.") PRECONDITION_FAILED = (412, "Precondition Failed", "The server does not meet one of the preconditions that the requester put on the request.") REQUEST_ENTITY_TOO_LARGE = (413, "Request Entity Too Large", "The request is larger than the server is willing or able to process.") REQUEST_URI_TOO_LARGE = (414, "Request-URI Too Long", "The URI provided was too long for the server to process.") UNSUPPORTED_MEDIA_TYPE = (415, "Unsupported Media Type", "The request entity has a media type which the server or resource does not support. For example, the client uploads an image as image/svg+xml, but the server requires that images use a different format.") INVALID_REQUEST_RANGE = (416, "Requested Range Not Satisfiable", "The client has asked for a portion of the file, but the server cannot supply that portion.[2] For example, if the client asked for a part of the file that lies beyond the end of the file.") EXPECTATION_FAILED = (417, "Expectation Failed", "The server cannot meet the requirements of the Expect request-header field.") TEAPOT = (418, "I'm a teapot (RFC 2324)", "This code was defined in 1998 as one of the traditional IETF April Fools' jokes, in RFC 2324, Hyper Text Coffee Pot Control Protocol, and is not expected to be implemented by actual HTTP servers.") UNPROCESSABLE_ENTITY = (422, "Unprocessable Entity (WebDAV) (RFC 4918)", "The request was well-formed but was unable to be followed due to semantic errors.") LOCKED = (423, "Locked (WebDAV) (RFC 4918)", "The resource that is being accessed is locked.") FAILED_DEPENDENCY = (424, "Failed Dependency (WebDAV) (RFC 4918)", "The request failed due to failure of a previous request (e.g. a PROPPATCH).") UNORDERED_COLLECTION = (425, "Unordered Collection (RFC 3648)", "Defined in drafts of 'WebDAV Advanced Collections Protocol', but not present in 'Web Distributed Authoring and Versioning (WebDAV) Ordered Collections Protocol'.") UPGRADE_REQUIRED = (426, "Upgrade Required (RFC 2817)", "The client should switch to a different protocol such as TLS/1.0.") NO_RESPONSE = (444, "No Response", "A Nginx HTTP server extension. The server returns no information to the client and closes the connection (useful as a deterrent for malware).") RETRY_WITH = (449, "Retry With", "A Microsoft extension. The request should be retried after performing the appropriate action.") PARANTAL_BLOCKED = (450, "Blocked by Windows Parental Controls", "A Microsoft extension. This error is given when Windows Parental Controls are turned on and are blocking access to the given webpage.") CLIENT_CLOSED_REQUEST = (499, "Client Closed Request", "An Nginx HTTP server extension. This code is introduced to log the case when the connection is closed by client while HTTP server is processing its request, making server unable to send the HTTP header back.") ## ## 5xx Server Error ## ## The server failed to fulfill an apparently valid request. ## ## Response status codes beginning with the digit "5" indicate cases in which ## the server is aware that it has encountered an error or is otherwise incapable ## of performing the request. Except when responding to a HEAD request, the server ## should include an entity containing an explanation of the error situation, and ## indicate whether it is a temporary or permanent condition. Likewise, user agents ## should display any included entity to the user. These response codes are ## applicable to any request method. ## INTERNAL_SERVER_ERROR = (500, "Internal Server Error", "A generic error message, given when no more specific message is suitable.") NOT_IMPLEMENTED = (501, "Not Implemented", "The server either does not recognise the request method, or it lacks the ability to fulfill the request.") BAD_GATEWAY = (502, "Bad Gateway", "The server was acting as a gateway or proxy and received an invalid response from the upstream server.") SERVICE_UNAVAILABLE = (503, "Service Unavailable", "The server is currently unavailable (because it is overloaded or down for maintenance). Generally, this is a temporary state.") GATEWAY_TIMEOUT = (504, "Gateway Timeout", "The server was acting as a gateway or proxy and did not receive a timely response from the upstream server.") UNSUPPORTED_HTTP_VERSION = (505, "HTTP Version Not Supported", "The server does not support the HTTP protocol version used in the request.") VARIANT_ALSO_NEGOTIATES = (506, "Variant Also Negotiates (RFC 2295)", "Transparent content negotiation for the request results in a circular reference.") INSUFFICIENT_STORAGE = (507, "Insufficient Storage (WebDAV)(RFC 4918)", "The server is unable to store the representation needed to complete the request.") BANDWIDTH_LIMIT_EXCEEDED = (509, "Bandwidth Limit Exceeded (Apache bw/limited extension)", "This status code, while used by many servers, is not specified in any RFCs.") NOT_EXTENDED = (510, "Not Extended (RFC 2774)", "Further extensions to the request are required for the server to fulfill it.") NETWORK_READ_TIMEOUT = (598, "Network read timeout error (Informal convention)", "This status code is not specified in any RFCs, but is used by some HTTP proxies to signal a network read timeout behind the proxy to a client in front of the proxy.") NETWORK_CONNECT_TIMEOUT = (599, "Network connect timeout error (Informal convention)", "This status code is not specified in any RFCs, but is used by some HTTP proxies to signal a network connect timeout behind the proxy to a client in front of the proxy.") class HttpException(Exception): """ Throw an instance of this class to deny a WebSocket connection during handshake in :meth:`autobahn.websocket.protocol.WebSocketServerProtocol.onConnect`. """ def __init__(self, code, reason): """ Constructor. :param code: HTTP error code. :type code: int :param reason: HTTP error reason. :type reason: str """ self.code = code self.reason = reason
apache-2.0
nthall/pip
pip/compat/dictconfig.py
921
23096
# This is a copy of the Python logging.config.dictconfig module, # reproduced with permission. It is provided here for backwards # compatibility for Python versions prior to 2.7. # # Copyright 2009-2010 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import absolute_import import logging.handlers import re import sys import types from pip._vendor import six # flake8: noqa IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # # This function is defined in logging only in recent versions of Python # try: from logging import _checkLevel except ImportError: def _checkLevel(level): if isinstance(level, int): rv = level elif str(level) == level: if level not in logging._levelNames: raise ValueError('Unknown level: %r' % level) rv = logging._levelNames[level] else: raise TypeError('Level not an integer or a ' 'valid string: %r' % level) return rv # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = __import__ def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] # print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) # rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, six.string_types): # str for py3k m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict((k, config[k]) for k in config if valid_ident(k)) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value class DictConfigurator(BaseConfigurator): """ Configure logging using a dictionary-like object to describe the configuration. """ def configure(self): """Do the configuration.""" config = self.config if 'version' not in config: raise ValueError("dictionary doesn't specify a version") if config['version'] != 1: raise ValueError("Unsupported version: %s" % config['version']) incremental = config.pop('incremental', False) EMPTY_DICT = {} logging._acquireLock() try: if incremental: handlers = config.get('handlers', EMPTY_DICT) # incremental handler config only if handler name # ties in to logging._handlers (Python 2.7) if sys.version_info[:2] == (2, 7): for name in handlers: if name not in logging._handlers: raise ValueError('No handler found with ' 'name %r' % name) else: try: handler = logging._handlers[name] handler_config = handlers[name] level = handler_config.get('level', None) if level: handler.setLevel(_checkLevel(level)) except StandardError as e: raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) loggers = config.get('loggers', EMPTY_DICT) for name in loggers: try: self.configure_logger(name, loggers[name], True) except StandardError as e: raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) root = config.get('root', None) if root: try: self.configure_root(root, True) except StandardError as e: raise ValueError('Unable to configure root ' 'logger: %s' % e) else: disable_existing = config.pop('disable_existing_loggers', True) logging._handlers.clear() del logging._handlerList[:] # Do formatters first - they don't refer to anything else formatters = config.get('formatters', EMPTY_DICT) for name in formatters: try: formatters[name] = self.configure_formatter( formatters[name]) except StandardError as e: raise ValueError('Unable to configure ' 'formatter %r: %s' % (name, e)) # Next, do filters - they don't refer to anything else, either filters = config.get('filters', EMPTY_DICT) for name in filters: try: filters[name] = self.configure_filter(filters[name]) except StandardError as e: raise ValueError('Unable to configure ' 'filter %r: %s' % (name, e)) # Next, do handlers - they refer to formatters and filters # As handlers can refer to other handlers, sort the keys # to allow a deterministic order of configuration handlers = config.get('handlers', EMPTY_DICT) for name in sorted(handlers): try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except StandardError as e: raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) # Next, do loggers - they refer to handlers and filters # we don't want to lose the existing loggers, # since other threads may have pointers to them. # existing is set to contain all existing loggers, # and as we go through the new configuration we # remove any which are configured. At the end, # what's left in existing is the set of loggers # which were in the previous configuration but # which are not in the new configuration. root = logging.root existing = list(root.manager.loggerDict) # The list needs to be sorted so that we can # avoid disabling child loggers of explicitly # named loggers. With a sorted list it is easier # to find the child loggers. existing.sort() # We'll keep the list of existing loggers # which are children of named loggers here... child_loggers = [] # now set up the new ones... loggers = config.get('loggers', EMPTY_DICT) for name in loggers: if name in existing: i = existing.index(name) prefixed = name + "." pflen = len(prefixed) num_existing = len(existing) i = i + 1 # look at the entry after name while (i < num_existing) and\ (existing[i][:pflen] == prefixed): child_loggers.append(existing[i]) i = i + 1 existing.remove(name) try: self.configure_logger(name, loggers[name]) except StandardError as e: raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) # Disable any old loggers. There's no point deleting # them as other threads may continue to hold references # and by disabling them, you stop them doing any logging. # However, don't disable children of named loggers, as that's # probably not what was intended by the user. for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True elif disable_existing: logger.disabled = True # And finally, do the root logger root = config.get('root', None) if root: try: self.configure_root(root) except StandardError as e: raise ValueError('Unable to configure root ' 'logger: %s' % e) finally: logging._releaseLock() def configure_formatter(self, config): """Configure a formatter from a dictionary.""" if '()' in config: factory = config['()'] # for use in exception handler try: result = self.configure_custom(config) except TypeError as te: if "'format'" not in str(te): raise # Name of parameter changed from fmt to format. # Retry with old name. # This is so that code can be used with older Python versions #(e.g. by Django) config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) else: fmt = config.get('format', None) dfmt = config.get('datefmt', None) result = logging.Formatter(fmt, dfmt) return result def configure_filter(self, config): """Configure a filter from a dictionary.""" if '()' in config: result = self.configure_custom(config) else: name = config.get('name', '') result = logging.Filter(name) return result def add_filters(self, filterer, filters): """Add filters to a filterer from a list of names.""" for f in filters: try: filterer.addFilter(self.config['filters'][f]) except StandardError as e: raise ValueError('Unable to add filter %r: %s' % (f, e)) def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except StandardError as e: raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: c = self.resolve(c) factory = c else: klass = self.resolve(config.pop('class')) # Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: config['target'] = self.config['handlers'][config['target']] except StandardError as e: raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict((k, config[k]) for k in config if valid_ident(k)) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise # The argument name changed from strm to stream # Retry with old name. # This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(_checkLevel(level)) if filters: self.add_filters(result, filters) return result def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except StandardError as e: raise ValueError('Unable to add handler %r: %s' % (h, e)) def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(_checkLevel(level)) if not incremental: # Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters) def configure_logger(self, name, config, incremental=False): """Configure a non-root logger from a dictionary.""" logger = logging.getLogger(name) self.common_logger_config(logger, config, incremental) propagate = config.get('propagate', None) if propagate is not None: logger.propagate = propagate def configure_root(self, config, incremental=False): """Configure a root logger from a dictionary.""" root = logging.getLogger() self.common_logger_config(root, config, incremental) dictConfigClass = DictConfigurator def dictConfig(config): """Configure logging using a dictionary.""" dictConfigClass(config).configure()
mit
pombredanne/libcomps
libcomps/src/python/docs/doc-sources/conf.py
2
8487
# -*- coding: utf-8 -*- # # x documentation build configuration file, created by # sphinx-quickstart on Mon Dec 9 16:34:26 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import ctypes clibcomps = ctypes.cdll.LoadLibrary("/home/jluza/libcomps/libcomps-build/src/libcomps.so.0.1.6") os.environ['LD_LIBRARY_PATH'] = "%s" % "/home/jluza/libcomps/libcomps-build/src" print os.environ['LD_LIBRARY_PATH'] sys.path.insert(0, os.path.abspath("/home/jluza/libcomps/libcomps-build/src/python/src/python2")) import libcomps # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'libcomps' copyright = u'RedHat 2013' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.6' # The full version, including alpha/beta/rc tags. release = ("0." "1." "6-" "9") # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' autodoc_member_order = "groupwise" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'xdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'x.tex', u'x Documentation', u'x', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'x', u'x Documentation', [u'x'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'x', u'x Documentation', u'x', 'x', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' def skip(app, what, name, obj, skip, options): if what == "module" and type(obj).__name__ == "builtin_function_or_method": return False if name == "__init__": return type(obj).__name__ == "wrapper_descriptor" return skip def setup(app): app.connect("autodoc-skip-member", skip) # Example configuration for intersphinx: refer to the Python standard library.
gpl-2.0
Health123/ansible
lib/ansible/utils/module_docs_fragments/cloudstack.py
85
2161
# -*- coding: utf-8 -*- # Copyright (c) 2015 René Moser <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard cloudstack documentation fragment DOCUMENTATION = ''' options: api_key: description: - API key of the CloudStack API. required: false default: null api_secret: description: - Secret key of the CloudStack API. required: false default: null api_url: description: - URL of the CloudStack API e.g. https://cloud.example.com/client/api. required: false default: null api_http_method: description: - HTTP method used. required: false default: 'get' choices: [ 'get', 'post' ] api_timeout: description: - HTTP timeout. required: false default: 10 requirements: - "python >= 2.6" - cs notes: - Ansible uses the C(cs) library's configuration method if credentials are not provided by the options C(api_url), C(api_key), C(api_secret). Configuration is read from several locations, in the following order. - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables. - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, - A C(cloudstack.ini) file in the current working directory. - A C(.cloudstack.ini) file in the users home directory. See https://github.com/exoscale/cs for more information. - This module supports check mode. '''
gpl-3.0
go-smart/glossia-quickstart
code/problem.py
1
13906
"""This requires CGAL mesher applied to series of surfaces. See readme.txt for details. """ from __future__ import print_function # Use FEniCS for Finite Element import fenics as d # Useful to import the derivative separately from dolfin import dx # Useful numerical libraries import numpy as N import matplotlib matplotlib.use('SVG') import matplotlib.pyplot as P # General tools import os import subprocess import shutil # UFL import ufl # Set interactive plotting on P.ion() # Use a separate Python file to declare variables import variables as v import vtk_tools input_mesh = "input" class IREProblem: """class IREProblem() This represents a Finite Element IRE problem using a similar algorithm to that of ULJ """ def __init__(self): pass def load(self): # Convert mesh from MSH to Dolfin-XML shutil.copyfile("input/%s.msh" % input_mesh, "%s.msh" % input_mesh) destination_xml = "%s.xml" % input_mesh subprocess.call(["dolfin-convert", "%s.msh" % input_mesh, destination_xml]) # Load mesh and boundaries mesh = d.Mesh(destination_xml) self.patches = d.MeshFunction("size_t", mesh, "%s_facet_region.xml" % input_mesh) self.subdomains = d.MeshFunction("size_t", mesh, "%s_physical_region.xml" % input_mesh) # Define differential over subdomains self.dxs = d.dx[self.subdomains] # Turn subdomains into a Numpy array self.subdomains_array = N.asarray(self.subdomains.array(), dtype=N.int32) # Create a map from subdomain indices to tissues self.tissues_by_subdomain = {} for i, t in v.tissues.items(): print(i, t) for j in t["indices"]: self.tissues_by_subdomain[j] = t self.mesh = mesh self.setup_fe() self.prepare_increase_conductivity() def load_patient_data(self): indicators = {} for subdomain in ("liver", "vessels", "tumour"): values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='uintp') for i in range(0, v.dim_depth): slice = N.loadtxt(os.path.join( v.patient_data_location, "patient-%s.%d.txt" % (subdomain, i + 1)) ) values[:, :, i] = slice.astype('uintp') indicators[subdomain] = values self.indicators = indicators def interpolate_to_patient_data(self, function, indicator): values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='float') it = N.nditer(values, flags=['multi_index']) u = N.empty((1,)) x = N.empty((3,)) delta = (v.delta_height, v.delta_width, v.delta_depth) offset = (v.offset_x, v.offset_y, v.offset_z) while not it.finished: if indicator[it.multi_index] != 1: it.iternext() continue x[0] = it.multi_index[1] * delta[1] - offset[0] x[1] = it.multi_index[0] * delta[0] - offset[1] x[2] = it.multi_index[2] * delta[2] - offset[2] function.eval(u, x) values[...] = u[0] it.iternext() return values def setup_fe(self): # Define the relevant function spaces V = d.FunctionSpace(self.mesh, "Lagrange", 1) self.V = V # DG0 is useful for defining piecewise constant functions DV = d.FunctionSpace(self.mesh, "Discontinuous Lagrange", 0) self.DV = DV # Define test and trial functions for FE self.z = d.TrialFunction(self.V) self.w = d.TestFunction(self.V) def per_tissue_constant(self, generator): fefunction = d.Function(self.DV) generated_values = dict((l, generator(l)) for l in N.unique(self.subdomains_array)) vector = N.vectorize(generated_values.get) fefunction.vector()[:] = vector(self.subdomains_array) return fefunction def get_tumour_volume(self): # Perhaps there is a prettier way, but integrate a unit function over the tumour tets one = d.Function(self.V) one.vector()[:] = 1 return sum(d.assemble(one * self.dxs(i)) for i in v.tissues["tumour"]["indices"]) def save_lesion(self): final_filename = "results/%s-max_e%06d.vtu" % (input_mesh, self.max_e_count) shutil.copyfile(final_filename, "../lesion_volume.vtu") destination = "../lesion_surface.vtp" vtk_tools.save_lesion(destination, final_filename, "max_E", (80, None)) print("Output file to %s?" % destination, os.path.exists(destination)) def solve(self): # TODO: when FEniCS ported to Python3, this should be exist_ok try: os.makedirs('results') except OSError: pass z, w = (self.z, self.w) u0 = d.Constant(0.0) # Define the linear and bilinear forms L = u0 * w * dx # Define useful functions cond = d.Function(self.DV) U = d.Function(self.V) # Initialize the max_e vector, that will store the cumulative max e values max_e = d.Function(self.V) max_e.vector()[:] = 0.0 max_e.rename("max_E", "Maximum energy deposition by location") max_e_file = d.File("results/%s-max_e.pvd" % input_mesh) max_e_per_step = d.Function(self.V) max_e_per_step_file = d.File("results/%s-max_e_per_step.pvd" % input_mesh) self.es = {} self.max_es = {} fi = d.File("results/%s-cond.pvd" % input_mesh) potential_file = d.File("results/%s-potential.pvd" % input_mesh) # Loop through the voltages and electrode combinations for i, (anode, cathode, voltage) in enumerate(v.electrode_triples): print("Electrodes %d (%lf) -> %d (0)" % (anode, voltage, cathode)) cond = d.project(self.sigma_start, V=self.DV) # Define the Dirichlet boundary conditions on the active needles uV = d.Constant(voltage) term1_bc = d.DirichletBC(self.V, uV, self.patches, v.needles[anode]) term2_bc = d.DirichletBC(self.V, u0, self.patches, v.needles[cathode]) e = d.Function(self.V) e.vector()[:] = max_e.vector() # Re-evaluate conductivity self.increase_conductivity(cond, e) for j in range(v.max_restarts): # Update the bilinear form a = d.inner(d.nabla_grad(z), cond * d.nabla_grad(w)) * dx # Solve again print(" [solving...") d.solve(a == L, U, bcs=[term1_bc, term2_bc]) print(" ....solved]") # Extract electric field norm for k in range(len(U.vector())): if N.isnan(U.vector()[k]): U.vector()[k] = 1e5 e_new = d.project(d.sqrt(d.dot(d.grad(U), d.grad(U))), self.V) # Take the max of the new field and the established electric field e.vector()[:] = N.array([max(*X) for X in zip(e.vector(), e_new.vector())]) # Re-evaluate conductivity fi << cond self.increase_conductivity(cond, e) potential_file << U # Save the max e function to a VTU max_e_per_step.vector()[:] = e.vector()[:] max_e_per_step_file << max_e_per_step # Store this electric field norm, for this triple, for later reference self.es[i] = e # Store the max of this electric field norm and that for all previous triples max_e_array = N.array([max(*X) for X in zip(max_e.vector(), e.vector())]) max_e.vector()[:] = max_e_array # Create a new max_e function for storage, or it will be overwritten by the next iteration max_e_new = d.Function(self.V) max_e_new.vector()[:] = max_e_array # Store this max e function for the cumulative coverage curve calculation later self.max_es[i] = max_e_new # Save the max e function to a VTU max_e_file << max_e self.max_e_count = i def prepare_increase_conductivity(self): def sigma_function(l, i): s = self.tissues_by_subdomain[l]["sigma"] if isinstance(s, list): return s[i] else: return s def threshold_function(l, i): s = self.tissues_by_subdomain[l]["sigma"] if isinstance(s, list): return self.tissues_by_subdomain[l][i] else: return 1 if i == "threshold reversible" else 0 self.sigma_start = self.per_tissue_constant(lambda l: sigma_function(l, 0)) self.sigma_end = self.per_tissue_constant(lambda l: sigma_function(l, 1)) self.threshold_reversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold reversible")) self.threshold_irreversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold irreversible")) self.k = (self.sigma_end - self.sigma_start) / (self.threshold_irreversible - self.threshold_reversible) self.h = self.sigma_start - self.k * self.threshold_reversible def increase_conductivity(self, cond, e): # Set up the three way choice function intermediate = e * self.k + self.h not_less_than = ufl.conditional(ufl.gt(e, self.threshold_irreversible), self.sigma_end, intermediate) cond_expression = ufl.conditional(ufl.lt(e, self.threshold_reversible), self.sigma_start, not_less_than) # Project this onto the function space cond_function = d.project(ufl.Max(cond_expression, cond), cond.function_space()) cond.assign(cond_function) def plot_bitmap_result(self): # Create a horizontal axis cc_haxis = N.linspace(5000, 1e5, 200) # Import the binary data indicating the location of structures self.load_patient_data() # Calculate the tumour volume; this is what we will compare against tumour_volume = (self.indicators["tumour"] == 1).sum() # Initialize the output_arrays vector a rescale the x to V/cm output_arrays = [cc_haxis / 100] # Loop through the electrode triples for i, triple in enumerate(v.electrode_triples): # Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF e_dg = self.interpolate_to_patient_data(self.max_es[i], self.indicators["tumour"]) # Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume) elim = N.vectorize(lambda x: (e_dg > x).sum() / tumour_volume) output_arrays.append(elim(cc_haxis)) # Compile into a convenient array output = N.array(zip(*output_arrays)) # Output cumulative coverage curves as CSV N.savetxt('results/%s-coverage_curves_bitmap.csv' % input_mesh, output) # Plot the coverage curves for (anode, cathode, voltage), a in zip(v.electrode_triples, output_arrays[1:]): P.plot(output_arrays[0], a, label="%d - %d" % (anode, cathode)) # Draw the plot P.draw() P.title(r"Bitmap-based") P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)") P.ylabel(r"Fraction of tumour beneath level") # Show a legend for the plot P.legend(loc=3) # Display the plot P.show(block=True) def plot_result(self): # Calculate preliminary relationships dofmap = self.DV.dofmap() cell_dofs = N.array([dofmap.cell_dofs(c)[0] for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])]) volumes = N.array([d.Cell(self.mesh, c).volume() for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])]) # Create a horizontal axis cc_haxis = N.linspace(5000, 1e5, 200) # Calculate the tumour volume; this is what we will compare against tumour_volume = self.get_tumour_volume() # Initialize the output_arrays vector a rescale the x to V/cm output_arrays = [cc_haxis / 100] # Loop through the electrode pairs for i, triple in enumerate(v.electrode_triples): # Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF e_dg = d.project(self.max_es[i], self.DV) # Calculate the "max e" contribution for each cell contributor = N.vectorize(lambda c: e_dg.vector()[c]) contributions = contributor(cell_dofs) # Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume) elim = N.vectorize(lambda x: volumes[contributions > x].sum() / tumour_volume) output_arrays.append(elim(cc_haxis)) # Compile into a convenient array output = N.array(zip(*output_arrays)) # Output cumulative coverage curves as CSV N.savetxt('results/%s-coverage_curves.csv' % input_mesh, output) # Plot the coverage curves for (anode, cathode, voltage), a in zip(v.electrode_triples, output_arrays[1:]): P.plot(output_arrays[0], a, label="%d - %d" % (anode, cathode)) # Draw the plot P.draw() P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)") P.ylabel(r"Fraction of tumour beneath level") # Show a legend for the plot P.legend(loc=3) # Display the plot P.savefig('%s-coverage_curves' % input_mesh)
mit
restudToolbox/package
development/testing/_modules/auxiliary_reliability.py
1
6085
from statsmodels.tools.eval_measures import rmse from copy import deepcopy import numpy as np import shlex import os from config import SPEC_DIR import respy def get_est_log_info(): """ Get the choice probabilities. """ with open('est.respy.info') as in_file: for line in in_file.readlines(): # Split line list_ = shlex.split(line) # Skip empty lines if len(list_) < 4: continue if list_[2] == 'Steps': num_steps = int(list_[3]) if list_[2] == 'Evaluations': num_evals = int(list_[3]) # Finishing return num_evals, num_steps def run(spec_dict, fname): """ Run a version of the Monte Carlo exercise. """ dirname = fname.replace('.ini', '') os.mkdir(dirname) os.chdir(dirname) # We first read in the first specification from the initial paper for our # baseline and process the deviations. respy_obj = respy.RespyCls(SPEC_DIR + fname) respy_obj.unlock() respy_obj.set_attr('file_est', '../truth/start/data.respy.dat') for key_ in spec_dict.keys(): respy_obj.set_attr(key_, spec_dict[key_]) if respy_obj.attr['num_procs'] > 1: respy_obj.set_attr('is_parallel', True) else: respy_obj.set_attr('is_parallel', False) respy_obj.lock() maxfun = respy_obj.get_attr('maxfun') # Let us first simulate a baseline sample, store the results for future # reference, and start an estimation from the true values. os.mkdir('truth') os.chdir('truth') respy_obj.write_out() simulate_specification(respy_obj, 'start', False) x, _ = respy.estimate(respy_obj) simulate_specification(respy_obj, 'stop', True, x) rmse_start, rmse_stop = get_rmse() num_evals, num_steps = get_est_log_info() os.chdir('../') record_results('Truth', rmse_start, rmse_stop, num_evals, num_steps, maxfun) # Now we will estimate a misspecified model on this dataset assuming that # agents are myopic. This will serve as a form of well behaved starting # values for the real estimation to follow. respy_obj.unlock() respy_obj.set_attr('delta', 0.00) respy_obj.lock() os.mkdir('static') os.chdir('static') respy_obj.write_out() simulate_specification(respy_obj, 'start', False) x, _ = respy.estimate(respy_obj) simulate_specification(respy_obj, 'stop', True, x) rmse_start, rmse_stop = get_rmse() num_evals, num_steps = get_est_log_info() os.chdir('../') record_results('Static', rmse_start, rmse_stop, num_evals, num_steps, maxfun) # # Using the results from the misspecified model as starting values, we see # # whether we can obtain the initial values. respy_obj.update_model_paras(x) respy_obj.unlock() respy_obj.set_attr('delta', 0.95) respy_obj.lock() os.mkdir('dynamic') os.chdir('dynamic') respy_obj.write_out() simulate_specification(respy_obj, 'start', False) x, _ = respy.estimate(respy_obj) simulate_specification(respy_obj, 'stop', True, x) rmse_start, rmse_stop = get_rmse() num_evals, num_steps = get_est_log_info() os.chdir('../') record_results('Dynamic', rmse_start, rmse_stop, num_evals, num_steps, maxfun) os.chdir('../') def get_choice_probabilities(fname, is_flatten=True): """ Get the choice probabilities. """ # Initialize container. stats = np.tile(np.nan, (0, 4)) with open(fname) as in_file: for line in in_file.readlines(): # Split line list_ = shlex.split(line) # Skip empty lines if not list_: continue # If OUTCOMES is reached, then we are done for good. if list_[0] == 'Outcomes': break # Any lines that do not have an integer as their first element # are not of interest. try: int(list_[0]) except ValueError: continue # All lines that make it down here are relevant. stats = np.vstack((stats, [float(x) for x in list_[1:]])) # Return all statistics as a flattened array. if is_flatten: stats = stats.flatten() # Finishing return stats def record_results(label, rmse_start, rmse_stop, num_evals, num_steps, maxfun): with open('reliability.respy.info', 'a') as out_file: # Setting up if label == 'Truth': out_file.write('\n RMSE\n\n') fmt = '{:>15} {:>15} {:>15} {:>15} {:>15}\n\n' out_file.write(fmt.format(*['Setup', 'Start', 'Stop', 'Evals', 'Steps'])) fmt = '{:>15} {:15.10f} {:15.10f} {:15} {:15}\n' out_file.write(fmt.format(*[label, rmse_start, rmse_stop, num_evals, num_steps])) # Add information on maximum allowed evaluations if label == 'Dynamic': fmt = '\n{:>15} {:<15} {:15}\n' out_file.write(fmt.format(*['Maximum', 'Evaluations', maxfun])) def get_rmse(): """ Compute the RMSE based on the relevant parameterization. """ fname = '../truth/start/data.respy.info' probs_true = get_choice_probabilities(fname, is_flatten=True) fname = 'start/data.respy.info' probs_start = get_choice_probabilities(fname, is_flatten=True) fname = 'stop/data.respy.info' probs_stop = get_choice_probabilities(fname, is_flatten=True) rmse_stop = rmse(probs_stop, probs_true) rmse_start = rmse(probs_start, probs_true) return rmse_start, rmse_stop def simulate_specification(respy_obj, subdir, update, paras=None): """ Simulate results to assess the estimation performance. Note that we do not update the object that is passed in. """ os.mkdir(subdir) os.chdir(subdir) respy_copy = deepcopy(respy_obj) if update: assert (paras is not None) respy_copy.update_model_paras(paras) respy_copy.write_out() respy.simulate(respy_copy) os.chdir('../')
mit
yesho/MITMf
core/proxyplugins.py
13
4292
# Copyright (c) 2010-2011 Ben Schmidt, Marcello Salvati # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA # import sys import logging import inspect import traceback from core.logger import logger formatter = logging.Formatter("%(asctime)s [ProxyPlugins] %(message)s", datefmt="%Y-%m-%d %H:%M:%S") log = logger().setup_logger("ProxyPlugins", formatter) class ProxyPlugins: ''' This class does some magic so that all we need to do in ServerConnection is do a self.plugins.hook() call and we will call any plugin that implements the function that it came from with the args passed to the original function. To do this, we are probably abusing the inspect module, and if it turns out to be too slow it can be changed. For now, it's nice because it makes for very little code needed to tie us in. Sadly, propagating changes back to the function is not quite as easy in all cases :-/ . Right now, changes to local function vars still have to be set back in the function. This only happens in handleResponse, but is still annoying. ''' mthdDict = {"connectionMade" : "request", "handleStatus" : "responsestatus", "handleResponse" : "response", "handleHeader" : "responseheaders", "handleEndHeaders": "responseheaders"} plugin_mthds = {} plugin_list = [] all_plugins = [] __shared_state = {} def __init__(self): self.__dict__ = self.__shared_state def set_plugins(self, plugins): '''Set the plugins in use''' for p in plugins: self.add_plugin(p) log.debug("Loaded {} plugin/s".format(len(plugins))) def add_plugin(self,p): '''Load a plugin''' self.plugin_list.append(p) log.debug("Adding {} plugin".format(p.name)) for mthd,pmthd in self.mthdDict.iteritems(): try: self.plugin_mthds[mthd].append(getattr(p,pmthd)) except KeyError: self.plugin_mthds[mthd] = [getattr(p,pmthd)] def remove_plugin(self,p): '''Unload a plugin''' self.plugin_list.remove(p) log.debug("Removing {} plugin".format(p.name)) for mthd,pmthd in self.mthdDict.iteritems(): self.plugin_mthds[mthd].remove(p) def hook(self): '''Magic to hook various function calls in sslstrip''' #gets the function name and args of our caller frame = sys._getframe(1) fname = frame.f_code.co_name keys,_,_,values = inspect.getargvalues(frame) #assumes that no one calls del on an arg :-/ args = {} for key in keys: args[key] = values[key] #prevent self conflict if (fname == "handleResponse") or (fname == "handleHeader") or (fname == "handleEndHeaders"): args['request'] = args['self'] args['response'] = args['self'].client else: args['request'] = args['self'] del args['self'] log.debug("hooking {}()".format(fname)) #calls any plugin that has this hook try: if self.plugin_mthds: for f in self.plugin_mthds[fname]: a = f(**args) if a != None: args = a except Exception as e: #This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating) log.error("Exception occurred in hooked function") traceback.print_exc() #pass our changes to the locals back down return args
gpl-3.0
kracwarlock/Lasagne
lasagne/tests/layers/test_conv.py
9
18394
import numpy as np import pytest import importlib import theano import lasagne from lasagne.utils import floatX def conv2d(input, kernel, border_mode): output = np.zeros((input.shape[0], kernel.shape[0], input.shape[2] + kernel.shape[2] - 1, input.shape[3] + kernel.shape[3] - 1, )) for i in range(kernel.shape[2]): for j in range(kernel.shape[3]): k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis] output[:, :, i:i + input.shape[2], j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2) if border_mode == 'valid': trim = (kernel.shape[2] - 1, kernel.shape[3] - 1) output = output[:, :, trim[0]:-trim[0], trim[1]:-trim[1]] elif border_mode == 'same': shift_x = (kernel.shape[2] - 1) // 2 shift_y = (kernel.shape[3] - 1) // 2 output = output[:, :, shift_x:input.shape[2] + shift_x, shift_y:input.shape[3] + shift_y] return output def conv2d_test_sets(): def _convert(input, kernel, output, kwargs): return [theano.shared(floatX(input)), floatX(kernel), output, kwargs] for border_mode in ['valid', 'full', 'same']: for stride in [1, 2, 3]: input = np.random.random((3, 1, 16, 23)) kernel = np.random.random((16, 1, 3, 3)) output = conv2d(input, kernel, border_mode=border_mode) output = output[:, :, ::stride, ::stride] yield _convert(input, kernel, output, {'border_mode': border_mode, 'stride': stride }) input = np.random.random((3, 3, 16, 23)) kernel = np.random.random((16, 3, 3, 3)) output = conv2d(input, kernel, border_mode=border_mode) output = output[:, :, ::stride, ::stride] yield _convert(input, kernel, output, {'border_mode': border_mode, 'stride': stride }) # bias-less case input = np.random.random((3, 1, 16, 23)) kernel = np.random.random((16, 1, 3, 3)) output = conv2d(input, kernel, border_mode='valid') yield _convert(input, kernel, output, {'b': None}) def conv1d(input, kernel, border_mode='valid'): output = [] for b in input: temp = [] for c in kernel: temp.append( np.convolve(b[0, :], c[0, :], mode=border_mode)) output.append(temp) return np.array(output) def conv1d_test_sets(): def _convert(input, kernel, output, kwargs): return [theano.shared(floatX(input)), floatX(kernel), output, kwargs] for border_mode in ['valid', 'full', 'same']: for stride in [1, 2, 3]: input = np.random.random((3, 1, 23)) kernel = np.random.random((16, 1, 3)) output = conv1d(input, kernel, border_mode) output = output[:, :, ::stride] yield _convert(input, kernel, output, {'border_mode': border_mode, 'stride': stride, }) # bias-less case input = np.random.random((3, 1, 23)) kernel = np.random.random((16, 1, 3)) output = conv1d(input, kernel, border_mode='valid') yield _convert(input, kernel, output, {'b': None}) def test_conv_output_length(): from lasagne.layers.conv import conv_output_length assert conv_output_length(13, 5, 3, 'valid', 2) == 3 assert conv_output_length(13, 5, 3, 'full', 2) == 6 assert conv_output_length(13, 5, 3, 'same', 2) == 5 assert conv_output_length(13, 5, 3, 'pad', 2) == 5 with pytest.raises(ValueError) as exc: conv_output_length(13, 5, 3, '_nonexistent_mode', 2) assert "Invalid border mode" in exc.value.args[0] @pytest.fixture def DummyInputLayer(): def factory(shape): from lasagne.layers.input import InputLayer return InputLayer(shape) return factory class TestConv1DLayer: @pytest.mark.parametrize( "input, kernel, output, kwargs", list(conv1d_test_sets())) @pytest.mark.parametrize("extra_kwargs", [ {}, {'untie_biases': True}, ]) def test_defaults(self, DummyInputLayer, input, kernel, output, kwargs, extra_kwargs): kwargs.update(extra_kwargs) b, c, w = input.shape.eval() input_layer = DummyInputLayer((b, c, w)) try: from lasagne.layers.conv import Conv1DLayer layer = Conv1DLayer( input_layer, num_filters=kernel.shape[0], filter_size=kernel.shape[2], W=kernel, **kwargs ) actual = layer.get_output_for(input).eval() assert actual.shape == output.shape assert actual.shape == layer.output_shape assert np.allclose(actual, output) except NotImplementedError: pass def test_init_none_nonlinearity_bias(self, DummyInputLayer): from lasagne.layers.conv import Conv1DLayer input_layer = DummyInputLayer((1, 2, 3)) layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,), nonlinearity=None, b=None) assert layer.nonlinearity == lasagne.nonlinearities.identity assert layer.b is None def test_invalid_border_mode(self, DummyInputLayer): from lasagne.layers.conv import Conv1DLayer input_layer = DummyInputLayer((1, 2, 3)) with pytest.raises(RuntimeError) as exc: layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,), border_mode='_nonexistent_mode') assert "Invalid border mode" in exc.value.args[0] class TestConv2DLayerImplementations: @pytest.fixture( params=[ ('lasagne.layers', 'Conv2DLayer', {}), ('lasagne.layers.cuda_convnet', 'Conv2DCCLayer', {'flip_filters': True}), ('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}), ('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}), ], ) def Conv2DImpl(self, request): impl_module_name, impl_name, impl_default_kwargs = request.param try: mod = importlib.import_module(impl_module_name) except ImportError: pytest.skip("{} not available".format(impl_module_name)) impl = getattr(mod, impl_name) def wrapper(*args, **kwargs): kwargs2 = impl_default_kwargs.copy() kwargs2.update(kwargs) return impl(*args, **kwargs2) wrapper.__name__ = impl_name return wrapper @pytest.mark.parametrize( "input, kernel, output, kwargs", list(conv2d_test_sets())) @pytest.mark.parametrize("extra_kwargs", [ {}, {'untie_biases': True}, ]) def test_defaults(self, Conv2DImpl, DummyInputLayer, input, kernel, output, kwargs, extra_kwargs): kwargs.update(extra_kwargs) b, c, h, w = input.shape.eval() input_layer = DummyInputLayer((b, c, h, w)) try: layer = Conv2DImpl( input_layer, num_filters=kernel.shape[0], filter_size=kernel.shape[2:], W=kernel, **kwargs ) actual = layer.get_output_for(input).eval() assert actual.shape == output.shape assert actual.shape == layer.output_shape assert np.allclose(actual, output) except NotImplementedError: pytest.skip() @pytest.mark.parametrize( "input, kernel, output, kwargs", list(conv2d_test_sets())) def test_with_nones(self, Conv2DImpl, DummyInputLayer, input, kernel, output, kwargs): b, c, h, w = input.shape.eval() input_layer = DummyInputLayer((None, c, None, None)) try: layer = Conv2DImpl( input_layer, num_filters=kernel.shape[0], filter_size=kernel.shape[2:], W=kernel, **kwargs ) actual = layer.get_output_for(input).eval() assert layer.output_shape == (None, kernel.shape[0], None, None) assert actual.shape == output.shape assert np.allclose(actual, output) except NotImplementedError: pytest.skip() def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer): input_layer = DummyInputLayer((1, 2, 3, 3)) layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3), nonlinearity=None, b=None) assert layer.nonlinearity == lasagne.nonlinearities.identity assert layer.b is None def test_invalid_border_mode(self, Conv2DImpl, DummyInputLayer): input_layer = DummyInputLayer((1, 2, 3)) with pytest.raises(RuntimeError) as exc: layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3), border_mode='_nonexistent_mode') assert "Invalid border mode" in exc.value.args[0] def test_get_params(self, Conv2DImpl, DummyInputLayer): input_layer = DummyInputLayer((128, 3, 32, 32)) layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3)) assert layer.get_params() == [layer.W, layer.b] assert layer.get_params(regularizable=False) == [layer.b] assert layer.get_params(regularizable=True) == [layer.W] assert layer.get_params(trainable=True) == [layer.W, layer.b] assert layer.get_params(trainable=False) == [] assert layer.get_params(_nonexistent_tag=True) == [] assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b] class TestConv2DDNNLayer: def test_import_without_gpu_or_cudnn_raises(self): from theano.sandbox.cuda import dnn if theano.config.device.startswith("gpu") and dnn.dnn_available(): pytest.skip() else: with pytest.raises(ImportError): import lasagne.layers.dnn def test_pad(self, DummyInputLayer): try: from lasagne.layers.dnn import Conv2DDNNLayer except ImportError: pytest.skip("dnn not available") input_layer = DummyInputLayer((1, 2, 3, 3)) with pytest.raises(RuntimeError) as exc: layer = Conv2DDNNLayer(input_layer, num_filters=1, filter_size=(3, 3), border_mode='valid', pad=(1, 1)) assert ("You cannot specify both 'border_mode' and 'pad'" in exc.value.args[0]) layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3), pad=(3, 3)) assert layer.output_shape == (1, 4, 7, 7) class TestConv2DMMLayer: def test_import_without_gpu_raises(self): if theano.config.device.startswith("gpu"): pytest.skip() else: with pytest.raises(ImportError): import lasagne.layers.corrmm def test_pad(self, DummyInputLayer): try: from lasagne.layers.corrmm import Conv2DMMLayer except ImportError: pytest.skip("corrmm not available") input_layer = DummyInputLayer((1, 2, 3, 3)) with pytest.raises(RuntimeError) as exc: layer = Conv2DMMLayer(input_layer, num_filters=1, filter_size=(3, 3), border_mode='valid', pad=(1, 1)) assert ("You cannot specify both 'border_mode' and 'pad'" in exc.value.args[0]) layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3), pad=(3, 3)) assert layer.output_shape == (1, 4, 7, 7) class TestConv2DCCLayer: def test_import_without_gpu_raises(self): if theano.config.device.startswith("gpu"): pytest.skip() else: with pytest.raises(ImportError): import lasagne.layers.cuda_convnet def test_unsupported_settings(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((128, 3, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 5)) assert ("Conv2DCCLayer only supports square filters" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), stride=(1, 2)) assert ("Conv2DCCLayer only supports square strides" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=15, filter_size=(3, 3)) assert ("Conv2DCCLayer requires num_filters to be a multiple of 16" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), pad=(1, 2)) assert ("Conv2DCCLayer only supports square padding" in exc.value.args[0]) input_layer = DummyInputLayer((128, 7, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3)) assert ("Conv2DCCLayer requires the number of input channels to be " "1, 2, 3 or a multiple of 4" in exc.value.args[0]) def test_pad(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((128, 3, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), border_mode='valid', pad=(1, 1)) assert ("You cannot specify both 'border_mode' and 'pad'" in exc.value.args[0]) layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), pad=(3, 3)) assert layer.output_shape == (128, 16, 36, 36) def test_dimshuffle_false_shapes(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01 layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False) assert layer.W.get_value().shape == (4, 3, 3, 16) assert layer.b.get_value().shape == (16,) layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False, untie_biases=True) assert layer.W.get_value().shape == (4, 3, 3, 16) assert layer.b.get_value().shape == (16, 30, 30) def test_dimshuffle_false_get_output_for(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") # this implementation is tested against FilterActs instead of # theano.tensor.nnet.conv.conv2d because using the latter leads to # numerical precision errors. from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs filter_acts = FilterActs(stride=1, pad=0, partial_sum=1) input = theano.shared(floatX(np.random.random((4, 5, 5, 8)))) kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16)))) input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01 layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False, W=kernel, b=None, nonlinearity=None) output = np.array(filter_acts(input, kernel).eval()) actual = layer.get_output_for(input).eval() actual = np.array(actual) assert actual.shape == output.shape assert actual.shape == layer.output_shape assert np.allclose(actual, output) class TestShuffleLayers: def test_bc01_to_c01b(self): from lasagne.layers.input import InputLayer try: from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = InputLayer((1, 2, 3, 4)) layer = ShuffleBC01ToC01BLayer(input_layer) assert layer.output_shape == (2, 3, 4, 1) input = floatX(np.random.random((1, 2, 3, 4))) output = input.transpose(1, 2, 3, 0) actual = layer.get_output_for(theano.shared(input)).eval() assert np.allclose(output, actual) def test_c01b_to_bc01(self): from lasagne.layers.input import InputLayer try: from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer except ImportError: pytest.skip("cuda_convnet not available") input_layer = InputLayer((1, 2, 3, 4)) layer = ShuffleC01BToBC01Layer(input_layer) assert layer.output_shape == (4, 1, 2, 3) input = floatX(np.random.random((1, 2, 3, 4))) output = input.transpose(3, 0, 1, 2) actual = layer.get_output_for(theano.shared(input)).eval() assert np.allclose(output, actual)
mit
UdjinM6/dash
test/functional/feature_help.py
2
1909
#!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Verify that starting dashd with -h works as expected.""" import subprocess from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class HelpTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def setup_network(self): self.add_nodes(self.num_nodes) # Don't start the node def run_test(self): self.log.info("Start dashd with -h for help text") self.nodes[0].start(extra_args=['-h'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) # Node should exit immediately and output help to stdout. ret_code = self.nodes[0].process.wait(timeout=1) assert_equal(ret_code, 0) output = self.nodes[0].process.stdout.read() assert b'Options' in output self.log.info("Help text received: {} (...)".format(output[0:60])) self.nodes[0].running = False self.log.info("Start dashd with -version for version information") self.nodes[0].start(extra_args=['-version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) # Node should exit immediately and output version to stdout. ret_code = self.nodes[0].process.wait(timeout=1) assert_equal(ret_code, 0) output = self.nodes[0].process.stdout.read() assert b'version' in output self.log.info("Version text received: {} (...)".format(output[0:60])) # Clean up TestNode state self.nodes[0].running = False self.nodes[0].process = None self.nodes[0].rpc_connected = False self.nodes[0].rpc = None if __name__ == '__main__': HelpTest().main()
mit
richardcs/ansible
lib/ansible/utils/module_docs_fragments/dimensiondata_wait.py
192
1429
# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. # # Authors: # - Adam Friedman <[email protected]> class ModuleDocFragment(object): # Dimension Data ("wait-for-completion" parameters) doc fragment DOCUMENTATION = ''' options: wait: description: - Should we wait for the task to complete before moving onto the next. required: false default: false wait_time: description: - The maximum amount of time (in seconds) to wait for the task to complete. - Only applicable if I(wait=true). required: false default: 600 wait_poll_interval: description: - The amount of time (in seconds) to wait between checks for task completion. - Only applicable if I(wait=true). required: false default: 2 '''
gpl-3.0
Perferom/android_kernel_lge_msm7x27-3.0.x
scripts/build-all.py
1250
9474
#! /usr/bin/env python # Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': 'arm-none-linux-gnueabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/qsd*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/apq*_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
level420/iconfont
generate.py
1
4953
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################################ # # qooxdoo - the new era of web development # # http://qooxdoo.org # # Copyright: # 2008 - 2012 1&1 Internet AG, Germany, http://www.1und1.de # # License: # MIT: https://opensource.org/licenses/MIT # See the LICENSE file in the project's top-level directory for details. # # Authors: # * Thomas Herchenroeder (thron7) # ################################################################################ ## # This is a stub proxy for the real generator.py ## import sys, os, re, subprocess, codecs, optparse CMD_PYTHON = sys.executable QOOXDOO_PATH = '../qooxdoo-master' QX_PYLIB = "tool/pylib" ## # A derived OptionParser class that ignores unknown options (The parent # class raises in those cases, and stops further processing). # We need this, as we are only interested in -c/--config on this level, and # want to ignore pot. other options. # class IgnoringUnknownOptionParser(optparse.OptionParser): ## # <rargs> is the raw argument list. The original _process_args mutates # rargs, processing options into <values> and copying interspersed args # into <largs>. This overridden version ignores unknown or ambiguous # options. def _process_args(self, largs, rargs, values): while rargs: try: optparse.OptionParser._process_args(self, largs, rargs, values) except (optparse.BadOptionError, optparse.AmbiguousOptionError): pass def parseArgs(): parser = IgnoringUnknownOptionParser(add_help_option=False) parser.add_option( "-c", "--config", dest="config", metavar="CFGFILE", default="config.json", help="path to configuration file" ) parser.add_option( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="run in verbose mode" ) (options, args) = parser.parse_args(sys.argv[1:]) return options, args ShellOptions, ShellArgs = parseArgs() # this is from misc.json, duplicated for decoupling _eolComment = re.compile(r'(?<![a-zA-Z]:)//.*$', re.M) # double $ for string.Template _mulComment = re.compile(r'/\*.*?\*/', re.S) def stripComments(s): b = _eolComment.sub('',s) b = _mulComment.sub('',b) return b def getQxPath(): path = QOOXDOO_PATH # OS env takes precedence if os.environ.has_key("QOOXDOO_PATH"): path = os.environ["QOOXDOO_PATH"] # else use QOOXDOO_PATH from config.json else: config_file = ShellOptions.config if os.path.exists(config_file): # try json parsing with qx json if not path.startswith('${'): # template macro has been resolved sys.path.insert(0, os.path.join(path, QX_PYLIB)) try: from misc import json got_json = True except: got_json = False got_path = False if got_json: config_str = codecs.open(config_file, "r", "utf-8").read() #config_str = stripComments(config_str) # not necessary under demjson config = json.loads(config_str) p = config.get("let") if p: p = p.get("QOOXDOO_PATH") if p: path = p got_path = True # regex parsing - error prone if not got_path: qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?') conffile = codecs.open(config_file, "r", "utf-8") aconffile = conffile.readlines() for line in aconffile: mo = qpathr.search(line) if mo: path = mo.group(1) break # assume first occurrence is ok path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path)) return path os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir qxpath = getQxPath() REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py') if not os.path.exists(REAL_GENERATOR): print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR sys.exit(1) elif ShellOptions.verbose: print "\nInvoking real generator under %s ..." % REAL_GENERATOR argList = [] argList.append(CMD_PYTHON) argList.append(REAL_GENERATOR) argList.extend(sys.argv[1:]) if sys.platform == "win32": argList1=[] for arg in argList: if arg.find(' ')>-1: argList1.append('"%s"' % arg) else: argList1.append(arg) argList = argList1 else: argList = ['"%s"' % x for x in argList] # quote argv elements cmd = " ".join(argList) try: retval = subprocess.call(cmd, shell=True) except: retval = 3 sys.exit(retval)
mit
pitch-sands/i-MPI
flask/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
17
15432
# postgresql/psycopg2.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for the PostgreSQL database via the psycopg2 driver. Driver ------ The psycopg2 driver is available at http://pypi.python.org/pypi/psycopg2/ . The dialect has several behaviors which are specifically tailored towards compatibility with this module. Note that psycopg1 is **not** supported. Connecting ---------- URLs are of the form ``postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]``. psycopg2-specific keyword arguments which are accepted by :func:`.create_engine()` are: * *server_side_cursors* - Enable the usage of "server side cursors" for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. ``connection.cursor('some name')``, which has the effect that result rows are not immediately pre-fetched and buffered after statement execution, but are instead left on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.base.ResultProxy` uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. Note that the ``stream_results=True`` execution option is a more targeted way of enabling this mode on a per-execution basis. * *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode per connection. True by default. Unix Domain Connections ------------------------ psycopg2 supports connecting via Unix domain connections. When the ``host`` portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, which specifies Unix-domain communication rather than TCP/IP communication:: create_engine("postgresql+psycopg2://user:password@/dbname") By default, the socket file used is to connect to a Unix-domain socket in ``/tmp``, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using ``host`` as an additional keyword argument:: create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") See also: `PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_ Per-Statement/Connection Execution Options ------------------------------------------- The following DBAPI-specific options are respected when used with :meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, :meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: * isolation_level - Set the transaction isolation level for the lifespan of a :class:`.Connection` (can only be set on a connection, not a statement or query). This includes the options ``SERIALIZABLE``, ``READ COMMITTED``, ``READ UNCOMMITTED`` and ``REPEATABLE READ``. * stream_results - Enable or disable usage of server side cursors. If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used. Unicode ------- By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` extension, such that the DBAPI receives and returns all strings as Python Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current "client encoding" setting; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. Typically, this can be changed to ``utf-8``, as a more useful default:: #client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2's ``set_client_encoding()`` method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) on all new connections based on the value passed to :func:`.create_engine` using the ``client_encoding`` parameter:: engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') This overrides the encoding specified in the Postgresql client configuration. .. versionadded:: 0.7.3 The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine`. SQLAlchemy can also be instructed to skip the usage of the psycopg2 ``UNICODE`` extension and to instead utilize it's own unicode encode/decode services, which are normally reserved only for those DBAPIs that don't fully support unicode directly. Passing ``use_native_unicode=False`` to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. SQLAlchemy will instead encode data itself into Python bytestrings on the way in and coerce from bytes on the way back, using the value of the :func:`.create_engine` ``encoding`` parameter, which defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming obsolete as more DBAPIs support unicode fully along with the approach of Python 3; in modern usage psycopg2 should be relied upon to handle unicode. Transactions ------------ The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. .. _psycopg2_isolation: Transaction Isolation Level --------------------------- The ``isolation_level`` parameter of :func:`.create_engine` here makes use psycopg2's ``set_isolation_level()`` connection method, rather than issuing a ``SET SESSION CHARACTERISTICS`` command. This because psycopg2 resets the isolation level on each new transaction, and needs to know at the API level what level should be used. NOTICE logging --------------- The psycopg2 dialect will log Postgresql NOTICE messages via the ``sqlalchemy.dialects.postgresql`` logger:: import logging logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) """ import re import logging from sqlalchemy import util, exc from sqlalchemy.util.compat import decimal from sqlalchemy import processors from sqlalchemy.engine import base from sqlalchemy.sql import expression from sqlalchemy import types as sqltypes from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \ PGIdentifierPreparer, PGExecutionContext, \ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ _INT_TYPES logger = logging.getLogger('sqlalchemy.dialects.postgresql') class _PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory(decimal.Decimal) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) class _PGEnum(ENUM): def __init__(self, *arg, **kw): super(_PGEnum, self).__init__(*arg, **kw) # Py2K if self.convert_unicode: self.convert_unicode = "force" # end Py2K class _PGArray(ARRAY): def __init__(self, *arg, **kw): super(_PGArray, self).__init__(*arg, **kw) # Py2K # FIXME: this check won't work for setups that # have convert_unicode only on their create_engine(). if isinstance(self.item_type, sqltypes.String) and \ self.item_type.convert_unicode: self.item_type.convert_unicode = "force" # end Py2K # When we're handed literal SQL, ensure it's a SELECT-query. Since # 8.3, combining cursors and "FOR UPDATE" has been fine. SERVER_SIDE_CURSOR_RE = re.compile( r'\s*SELECT', re.I | re.UNICODE) _server_side_id = util.counter() class PGExecutionContext_psycopg2(PGExecutionContext): def create_cursor(self): # TODO: coverage for server side cursors + select.for_update() if self.dialect.server_side_cursors: is_server_side = \ self.execution_options.get('stream_results', True) and ( (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \ or \ ( (not self.compiled or isinstance(self.compiled.statement, expression._TextClause)) and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement)) ) ) else: is_server_side = self.execution_options.get('stream_results', False) self.__is_server_side = is_server_side if is_server_side: # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) return self._dbapi_connection.cursor(ident) else: return self._dbapi_connection.cursor() def get_result_proxy(self): # TODO: ouch if logger.isEnabledFor(logging.INFO): self._log_notices(self.cursor) if self.__is_server_side: return base.BufferedRowResultProxy(self) else: return base.ResultProxy(self) def _log_notices(self, cursor): for notice in cursor.connection.notices: # NOTICE messages have a # newline character at the end logger.info(notice.rstrip()) cursor.connection.notices[:] = [] class PGCompiler_psycopg2(PGCompiler): def visit_mod(self, binary, **kw): return self.process(binary.left) + " %% " + self.process(binary.right) def post_process_text(self, text): return text.replace('%', '%%') class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace('%', '%%') class PGDialect_psycopg2(PGDialect): driver = 'psycopg2' # Py2K supports_unicode_statements = False # end Py2K default_paramstyle = 'pyformat' supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_psycopg2 statement_compiler = PGCompiler_psycopg2 preparer = PGIdentifierPreparer_psycopg2 psycopg2_version = (0, 0) colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric : _PGNumeric, ENUM : _PGEnum, # needs force_unicode sqltypes.Enum : _PGEnum, # needs force_unicode ARRAY : _PGArray, # needs force_unicode } ) def __init__(self, server_side_cursors=False, use_native_unicode=True, client_encoding=None, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.supports_unicode_binds = use_native_unicode self.client_encoding = client_encoding if self.dbapi and hasattr(self.dbapi, '__version__'): m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', self.dbapi.__version__) if m: self.psycopg2_version = tuple( int(x) for x in m.group(1, 2, 3) if x is not None) @classmethod def dbapi(cls): psycopg = __import__('psycopg2') return psycopg @util.memoized_property def _isolation_lookup(self): extensions = __import__('psycopg2.extensions').extensions return { 'READ COMMITTED':extensions.ISOLATION_LEVEL_READ_COMMITTED, 'READ UNCOMMITTED':extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, 'REPEATABLE READ':extensions.ISOLATION_LEVEL_REPEATABLE_READ, 'SERIALIZABLE':extensions.ISOLATION_LEVEL_SERIALIZABLE } def set_isolation_level(self, connection, level): try: level = self._isolation_lookup[level.replace('_', ' ')] except KeyError: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) connection.set_isolation_level(level) def on_connect(self): fns = [] if self.client_encoding is not None: def on_connect(conn): conn.set_client_encoding(self.client_encoding) fns.append(on_connect) if self.isolation_level is not None: def on_connect(conn): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) if self.dbapi and self.use_native_unicode: extensions = __import__('psycopg2.extensions').extensions def on_connect(conn): extensions.register_type(extensions.UNICODE, conn) fns.append(on_connect) if fns: def on_connect(conn): for fn in fns: fn(conn) return on_connect else: return None def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: opts['port'] = int(opts['port']) opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.OperationalError): # these error messages from libpq: interfaces/libpq/fe-misc.c. # TODO: these are sent through gettext in libpq and we can't # check within other locales - consider using connection.closed return 'terminating connection' in str(e) or \ 'closed the connection' in str(e) or \ 'connection not open' in str(e) or \ 'could not receive data from server' in str(e) elif isinstance(e, self.dbapi.InterfaceError): # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h return 'connection already closed' in str(e) or \ 'cursor already closed' in str(e) elif isinstance(e, self.dbapi.ProgrammingError): # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". return "losed the connection unexpectedly" in str(e) else: return False dialect = PGDialect_psycopg2
bsd-3-clause
ProfessorX/Config
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KPixmapSequence.py
1
1041
# encoding: utf-8 # module PyKDE4.kdeui # from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so # by generator 1.135 # no doc # imports import PyKDE4.kdecore as __PyKDE4_kdecore import PyQt4.QtCore as __PyQt4_QtCore import PyQt4.QtGui as __PyQt4_QtGui import PyQt4.QtSvg as __PyQt4_QtSvg class KPixmapSequence(): # skipped bases: <class 'sip.wrapper'> # no doc def frameAt(self, *args, **kwargs): # real signature unknown pass def frameCount(self, *args, **kwargs): # real signature unknown pass def frameSize(self, *args, **kwargs): # real signature unknown pass def isEmpty(self, *args, **kwargs): # real signature unknown pass def isValid(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass __weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """list of weak references to the object (if defined)"""
gpl-2.0
vimagick/youtube-dl
youtube_dl/extractor/channel9.py
124
11345
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class Channel9IE(InfoExtractor): ''' Common extractor for channel9.msdn.com. The type of provided URL (video or playlist) is determined according to meta Search.PageType from web page HTML rather than URL itself, as it is not always possible to do. ''' IE_DESC = 'Channel 9' IE_NAME = 'channel9' _VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?' _TESTS = [ { 'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002', 'md5': 'bbd75296ba47916b754e73c3a4bbdf10', 'info_dict': { 'id': 'Events/TechEd/Australia/2013/KOS002', 'ext': 'mp4', 'title': 'Developer Kick-Off Session: Stuff We Love', 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f', 'duration': 4576, 'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg', 'session_code': 'KOS002', 'session_day': 'Day 1', 'session_room': 'Arena 1A', 'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'], }, }, { 'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing', 'md5': 'b43ee4529d111bc37ba7ee4f34813e68', 'info_dict': { 'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing', 'ext': 'mp4', 'title': 'Self-service BI with Power BI - nuclear testing', 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', 'duration': 1540, 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', 'authors': ['Mike Wilmot'], }, } ] _RSS_URL = 'http://channel9.msdn.com/%s/RSS' # Sorted by quality _known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4'] def _restore_bytes(self, formatted_size): if not formatted_size: return 0 m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size) if not m: return 0 units = m.group('units') try: exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper()) except ValueError: return 0 size = float(m.group('size')) return int(size * (1024 ** exponent)) def _formats_from_html(self, html): FORMAT_REGEX = r''' (?x) <a\s+href="(?P<url>[^"]+)">(?P<quality>[^<]+)</a>\s* <span\s+class="usage">\((?P<note>[^\)]+)\)</span>\s* (?:<div\s+class="popup\s+rounded">\s* <h3>File\s+size</h3>\s*(?P<filesize>.*?)\s* </div>)? # File size part may be missing ''' # Extract known formats formats = [{ 'url': x.group('url'), 'format_id': x.group('quality'), 'format_note': x.group('note'), 'format': '%s (%s)' % (x.group('quality'), x.group('note')), 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate 'preference': self._known_formats.index(x.group('quality')), 'vcodec': 'none' if x.group('note') == 'Audio only' else None, } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] self._sort_formats(formats) return formats def _extract_title(self, html): title = self._html_search_meta('title', html, 'title') if title is None: title = self._og_search_title(html) TITLE_SUFFIX = ' (Channel 9)' if title is not None and title.endswith(TITLE_SUFFIX): title = title[:-len(TITLE_SUFFIX)] return title def _extract_description(self, html): DESCRIPTION_REGEX = r'''(?sx) <div\s+class="entry-content">\s* <div\s+id="entry-body">\s* (?P<description>.+?)\s* </div>\s* </div> ''' m = re.search(DESCRIPTION_REGEX, html) if m is not None: return m.group('description') return self._html_search_meta('description', html, 'description') def _extract_duration(self, html): m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html) return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None def _extract_slides(self, html): m = re.search(r'<a href="(?P<slidesurl>[^"]+)" class="slides">Slides</a>', html) return m.group('slidesurl') if m is not None else None def _extract_zip(self, html): m = re.search(r'<a href="(?P<zipurl>[^"]+)" class="zip">Zip</a>', html) return m.group('zipurl') if m is not None else None def _extract_avg_rating(self, html): m = re.search(r'<p class="avg-rating">Avg Rating: <span>(?P<avgrating>[^<]+)</span></p>', html) return float(m.group('avgrating')) if m is not None else 0 def _extract_rating_count(self, html): m = re.search(r'<div class="rating-count">\((?P<ratingcount>[^<]+)\)</div>', html) return int(self._fix_count(m.group('ratingcount'))) if m is not None else 0 def _extract_view_count(self, html): m = re.search(r'<li class="views">\s*<span class="count">(?P<viewcount>[^<]+)</span> Views\s*</li>', html) return int(self._fix_count(m.group('viewcount'))) if m is not None else 0 def _extract_comment_count(self, html): m = re.search(r'<li class="comments">\s*<a href="#comments">\s*<span class="count">(?P<commentcount>[^<]+)</span> Comments\s*</a>\s*</li>', html) return int(self._fix_count(m.group('commentcount'))) if m is not None else 0 def _fix_count(self, count): return int(str(count).replace(',', '')) if count is not None else None def _extract_authors(self, html): m = re.search(r'(?s)<li class="author">(.*?)</li>', html) if m is None: return None return re.findall(r'<a href="/Niners/[^"]+">([^<]+)</a>', m.group(1)) def _extract_session_code(self, html): m = re.search(r'<li class="code">\s*(?P<code>.+?)\s*</li>', html) return m.group('code') if m is not None else None def _extract_session_day(self, html): m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html) return m.group('day') if m is not None else None def _extract_session_room(self, html): m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html) return m.group('room') if m is not None else None def _extract_session_speakers(self, html): return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html) def _extract_content(self, html, content_path): # Look for downloadable content formats = self._formats_from_html(html) slides = self._extract_slides(html) zip_ = self._extract_zip(html) # Nothing to download if len(formats) == 0 and slides is None and zip_ is None: self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path) return # Extract meta title = self._extract_title(html) description = self._extract_description(html) thumbnail = self._og_search_thumbnail(html) duration = self._extract_duration(html) avg_rating = self._extract_avg_rating(html) rating_count = self._extract_rating_count(html) view_count = self._extract_view_count(html) comment_count = self._extract_comment_count(html) common = { '_type': 'video', 'id': content_path, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'avg_rating': avg_rating, 'rating_count': rating_count, 'view_count': view_count, 'comment_count': comment_count, } result = [] if slides is not None: d = common.copy() d.update({'title': title + '-Slides', 'url': slides}) result.append(d) if zip_ is not None: d = common.copy() d.update({'title': title + '-Zip', 'url': zip_}) result.append(d) if len(formats) > 0: d = common.copy() d.update({'title': title, 'formats': formats}) result.append(d) return result def _extract_entry_item(self, html, content_path): contents = self._extract_content(html, content_path) if contents is None: return contents authors = self._extract_authors(html) for content in contents: content['authors'] = authors return contents def _extract_session(self, html, content_path): contents = self._extract_content(html, content_path) if contents is None: return contents session_meta = { 'session_code': self._extract_session_code(html), 'session_day': self._extract_session_day(html), 'session_room': self._extract_session_room(html), 'session_speakers': self._extract_session_speakers(html), } for content in contents: content.update(session_meta) return self.playlist_result(contents) def _extract_list(self, content_path): rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS') entries = [self.url_result(session_url.text, 'Channel9') for session_url in rss.findall('./channel/item/link')] title_text = rss.find('./channel/title').text return self.playlist_result(entries, content_path, title_text) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) content_path = mobj.group('contentpath') webpage = self._download_webpage(url, content_path, 'Downloading web page') page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage) if page_type_m is not None: page_type = page_type_m.group('pagetype') if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content return self._extract_entry_item(webpage, content_path) elif page_type == 'Session': # Event session page, may contain downloadable content return self._extract_session(webpage, content_path) elif page_type == 'Event': return self._extract_list(content_path) else: raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True) else: # Assuming list return self._extract_list(content_path)
unlicense
lizoyu/kaggle-DigitRecognizer
jupyter/resume_train.py
1
1272
from lib.data_utils import get_MNIST_data from keras.models import load_model from keras.backend import tf as ktf from keras.optimizers import RMSprop from keras.callbacks import ModelCheckpoint, EarlyStopping # Read the MNIST data. Notice that we assume that it's 'kaggle-DigitRecognizer/data/train.csv', and we use helper function to read into a dictionary. # by default, there would be 41000 training data, 1000 test data and 1000 validation data(within traning set) data = get_MNIST_data(fit=True) # load the model(checkpoint) tunemodel = load_model('../models/tuneResNet_early_04-0.0146.h5', custom_objects={'ktf': ktf}) # set the loss and optimizer rmsprop = RMSprop(lr=0.0001) tunemodel.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy']) # fit the model checkpoint = ModelCheckpoint('../models/tuneResNet_early_{epoch:02d}-{loss:.4f}.h5', monitor='loss', save_best_only=False) earlystop = EarlyStopping(min_delta=0.001, patience=1) tunemodel.fit(data['X_train'], data['y_train'].reshape(-1, 1), batch_size=16, epochs=10, validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)), callbacks=[checkpoint, earlystop], initial_epoch=5)
gpl-3.0
GoogleCloudPlatform/ml-pipeline-generator-python
examples/kfp/demo.py
1
2531
# python3 # Copyright 2020 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Demo for KubeFlow Pipelines.""" import json import os from ml_pipeline_gen.models import TFModel from ml_pipeline_gen.pipelines import KfpPipeline from model.census_preprocess import load_data def _upload_data_to_gcs(model): """Calls the preprocessing fn which uploads train/eval data to GCS.""" load_data(model.data["train"], model.data["evaluation"]) # TODO(humichael): See if there's a way to support csv batch predicts. def _upload_input_data_to_gcs(model, data): input_path = "tf_input_data.json" with open(input_path, "w+") as f: for features in data: f.write(json.dumps(features) + "\n") model.upload_pred_input_data(input_path) os.remove(input_path) # pylint: disable=g-import-not-at-top def main(): config = "config.yaml" model = TFModel(config) model.generate_files() _upload_data_to_gcs(model) pipeline = KfpPipeline(model) # preprocess and upload dataset to expected location. load_data(model.data["train"], model.data["evaluation"]) # define pipeline structure p = pipeline.add_train_component() pipeline.add_deploy_component(parent=p) pipeline.add_predict_component(parent=p) pipeline.print_structure() pipeline.generate_pipeline() # Create batch prediction data in GCS. pred_input = [{ "age": 0.02599666, "workclass": 6, "education_num": 1.1365801, "marital_status": 4, "occupation": 0, "relationship": 1, "race": 4, "capital_gain": 0.14693314, "capital_loss": -0.21713187, "hours_per_week": -0.034039237, "native_country": 38, "income_bracket": 0, }] _upload_input_data_to_gcs(model, pred_input) # Run the pipeline. # pylint: disable=import-outside-toplevel from orchestration import pipeline as kfp_pipeline kfp_pipeline.main() if __name__ == "__main__": main()
apache-2.0
ernstp/kivy
kivy/tests/test_lang.py
26
5715
''' Language tests ============== ''' import unittest from weakref import proxy from functools import partial class BaseClass(object): uid = 0 # base class needed for builder def __init__(self, **kwargs): super(BaseClass, self).__init__() self.proxy_ref = proxy(self) self.children = [] self.parent = None self.binded_func = {} self.id = None self.ids = {} self.cls = [] self.ids = {} self.uid = BaseClass.uid BaseClass.uid += 1 def add_widget(self, widget): self.children.append(widget) widget.parent = self def create_property(self, name, value=None): pass def is_event_type(self, key): return key.startswith('on_') def fbind(self, name, func, *largs): self.binded_func[name] = partial(func, *largs) return True class TestClass(BaseClass): obj = None class TestClass2(BaseClass): obj = None class TestClass3(BaseClass): obj = None class LangTestCase(unittest.TestCase): def import_builder(self): from kivy.factory import Factory from kivy.lang import BuilderBase Builder = BuilderBase() Factory.register('TestClass', cls=TestClass) Factory.register('TestClass2', cls=TestClass2) Factory.register('TestClass3', cls=TestClass3) return Builder def test_loading_failed_1(self): # invalid indent Builder = self.import_builder() from kivy.lang import ParserException try: Builder.load_string('''#:kivy 1.0 <TestClass>: ''') self.fail('Invalid indentation.') except ParserException: pass def test_parser_numeric_1(self): Builder = self.import_builder() Builder.load_string('<TestClass>:\n\tobj: (.5, .5, .5)') wid = TestClass() Builder.apply(wid) self.assertEqual(wid.obj, (0.5, 0.5, 0.5)) def test_parser_numeric_2(self): Builder = self.import_builder() Builder.load_string('<TestClass>:\n\tobj: (0.5, 0.5, 0.5)') wid = TestClass() Builder.apply(wid) self.assertEqual(wid.obj, (0.5, 0.5, 0.5)) def test_references(self): Builder = self.import_builder() Builder.load_string(''' <TestClass>: textinput: textinput TestClass2: id: textinput ''') wid = TestClass() Builder.apply(wid) self.assertTrue(hasattr(wid, 'textinput')) self.assertTrue(getattr(wid, 'textinput') is not None) def test_references_with_template(self): Builder = self.import_builder() Builder.load_string(''' [Item@TestClass3]: title: ctx.title <TestClass>: textinput: textinput Item: title: 'bleh' TestClass2: id: textinput ''') wid = TestClass() Builder.apply(wid) self.assertTrue(hasattr(wid, 'textinput')) self.assertTrue(getattr(wid, 'textinput') is not None) def test_references_with_template_case_2(self): Builder = self.import_builder() Builder.load_string(''' [Item@TestClass3]: title: ctx.title <TestClass>: textinput: textinput TestClass2: id: textinput Item: title: 'bleh' ''') wid = TestClass() Builder.apply(wid) self.assertTrue(hasattr(wid, 'textinput')) self.assertTrue(getattr(wid, 'textinput') is not None) def test_references_with_template_case_3(self): Builder = self.import_builder() Builder.load_string(''' [Item@TestClass3]: title: ctx.title <TestClass>: textinput: textinput TestClass2: Item: title: 'bleh' TestClass2: TestClass2: id: textinput ''') wid = TestClass() Builder.apply(wid) self.assertTrue(hasattr(wid, 'textinput')) self.assertTrue(getattr(wid, 'textinput') is not None) def test_with_multiline(self): Builder = self.import_builder() Builder.load_string(''' <TestClass>: on_press: print('hello world') print('this is working !') self.a = 1 ''') wid = TestClass() Builder.apply(wid) wid.a = 0 self.assertTrue('on_press' in wid.binded_func) wid.binded_func['on_press']() self.assertEquals(wid.a, 1) def test_with_eight_spaces(self): Builder = self.import_builder() Builder.load_string(''' <TestClass>: on_press: print('hello world') print('this is working !') self.a = 1 ''') wid = TestClass() Builder.apply(wid) wid.a = 0 self.assertTrue('on_press' in wid.binded_func) wid.binded_func['on_press']() self.assertEquals(wid.a, 1) def test_with_one_space(self): Builder = self.import_builder() Builder.load_string(''' <TestClass>: on_press: print('hello world') print('this is working !') self.a = 1 ''') wid = TestClass() Builder.apply(wid) wid.a = 0 self.assertTrue('on_press' in wid.binded_func) wid.binded_func['on_press']() self.assertEquals(wid.a, 1) def test_with_two_spaces(self): Builder = self.import_builder() Builder.load_string(''' <TestClass>: on_press: print('hello world') print('this is working !') self.a = 1 ''') wid = TestClass() Builder.apply(wid) wid.a = 0 self.assertTrue('on_press' in wid.binded_func) wid.binded_func['on_press']() self.assertEquals(wid.a, 1)
mit
tempbottle/kbengine
kbe/src/lib/python/Lib/distutils/spawn.py
81
7514
"""distutils.spawn Provides the 'spawn()' function, a front-end to various platform- specific functions for launching another program in a sub-process. Also provides the 'find_executable()' to search the path for a given executable name. """ import sys import os from distutils.errors import DistutilsPlatformError, DistutilsExecError from distutils.debug import DEBUG from distutils import log def spawn(cmd, search_path=1, verbose=0, dry_run=0): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. cmd[0] is the program to run and cmd[1:] are the rest of its arguments. There is no way to run a program with a name different from that of its executable. If 'search_path' is true (the default), the system's executable search path will be used to find the program; otherwise, cmd[0] must be the exact path to the executable. If 'dry_run' is true, the command will not actually be run. Raise DistutilsExecError if running the program fails in any way; just return on success. """ # cmd is documented as a list, but just in case some code passes a tuple # in, protect our %-formatting code against horrible death cmd = list(cmd) if os.name == 'posix': _spawn_posix(cmd, search_path, dry_run=dry_run) elif os.name == 'nt': _spawn_nt(cmd, search_path, dry_run=dry_run) else: raise DistutilsPlatformError( "don't know how to spawn programs on platform '%s'" % os.name) def _nt_quote_args(args): """Quote command-line arguments for DOS/Windows conventions. Just wraps every argument which contains blanks in double quotes, and returns a new argument list. """ # XXX this doesn't seem very robust to me -- but if the Windows guys # say it'll work, I guess I'll have to accept it. (What if an arg # contains quotes? What other magic characters, other than spaces, # have to be escaped? Is there an escaping mechanism other than # quoting?) for i, arg in enumerate(args): if ' ' in arg: args[i] = '"%s"' % arg return args def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0): executable = cmd[0] cmd = _nt_quote_args(cmd) if search_path: # either we find one or it stays the same executable = find_executable(executable) or executable log.info(' '.join([executable] + cmd[1:])) if not dry_run: # spawn for NT requires a full path to the .exe try: rc = os.spawnv(os.P_WAIT, executable, cmd) except OSError as exc: # this seems to happen when the command isn't found if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed: %s" % (cmd, exc.args[-1])) if rc != 0: # and this reflects the command running but failing if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed with exit status %d" % (cmd, rc)) if sys.platform == 'darwin': from distutils import sysconfig _cfg_target = None _cfg_target_split = None def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0): log.info(' '.join(cmd)) if dry_run: return executable = cmd[0] exec_fn = search_path and os.execvp or os.execv env = None if sys.platform == 'darwin': global _cfg_target, _cfg_target_split if _cfg_target is None: _cfg_target = sysconfig.get_config_var( 'MACOSX_DEPLOYMENT_TARGET') or '' if _cfg_target: _cfg_target_split = [int(x) for x in _cfg_target.split('.')] if _cfg_target: # ensure that the deployment target of build process is not less # than that used when the interpreter was built. This ensures # extension modules are built with correct compatibility values cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target) if _cfg_target_split > [int(x) for x in cur_target.split('.')]: my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: ' 'now "%s" but "%s" during configure' % (cur_target, _cfg_target)) raise DistutilsPlatformError(my_msg) env = dict(os.environ, MACOSX_DEPLOYMENT_TARGET=cur_target) exec_fn = search_path and os.execvpe or os.execve pid = os.fork() if pid == 0: # in the child try: if env is None: exec_fn(executable, cmd) else: exec_fn(executable, cmd, env) except OSError as e: if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r: %s\n" % (cmd, e.strerror)) os._exit(1) if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r for unknown reasons" % cmd) os._exit(1) else: # in the parent # Loop until the child either exits or is terminated by a signal # (ie. keep waiting if it's merely stopped) while True: try: pid, status = os.waitpid(pid, 0) except OSError as exc: import errno if exc.errno == errno.EINTR: continue if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed: %s" % (cmd, exc.args[-1])) if os.WIFSIGNALED(status): if not DEBUG: cmd = executable raise DistutilsExecError( "command %r terminated by signal %d" % (cmd, os.WTERMSIG(status))) elif os.WIFEXITED(status): exit_status = os.WEXITSTATUS(status) if exit_status == 0: return # hey, it succeeded! else: if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed with exit status %d" % (cmd, exit_status)) elif os.WIFSTOPPED(status): continue else: if not DEBUG: cmd = executable raise DistutilsExecError( "unknown error executing %r: termination status %d" % (cmd, status)) def find_executable(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found. """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) base, ext = os.path.splitext(executable) if (sys.platform == 'win32') and (ext != '.exe'): executable = executable + '.exe' if not os.path.isfile(executable): for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None else: return executable
lgpl-3.0
benfinke/ns_python
nssrc/com/citrix/netscaler/nitro/resource/config/tm/tmsamlssoprofile.py
3
70968
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class tmsamlssoprofile(base_resource) : """ Configuration for SAML sso action resource. """ def __init__(self) : self._name = "" self._samlsigningcertname = "" self._assertionconsumerserviceurl = "" self._relaystaterule = "" self._sendpassword = "" self._samlissuername = "" self._signaturealg = "" self._digestmethod = "" self._audience = "" self._nameidformat = "" self._nameidexpr = "" self._attribute1 = "" self._attribute1expr = "" self._attribute1friendlyname = "" self._attribute1format = "" self._attribute2 = "" self._attribute2expr = "" self._attribute2friendlyname = "" self._attribute2format = "" self._attribute3 = "" self._attribute3expr = "" self._attribute3friendlyname = "" self._attribute3format = "" self._attribute4 = "" self._attribute4expr = "" self._attribute4friendlyname = "" self._attribute4format = "" self._attribute5 = "" self._attribute5expr = "" self._attribute5friendlyname = "" self._attribute5format = "" self._attribute6 = "" self._attribute6expr = "" self._attribute6friendlyname = "" self._attribute6format = "" self._attribute7 = "" self._attribute7expr = "" self._attribute7friendlyname = "" self._attribute7format = "" self._attribute8 = "" self._attribute8expr = "" self._attribute8friendlyname = "" self._attribute8format = "" self._attribute9 = "" self._attribute9expr = "" self._attribute9friendlyname = "" self._attribute9format = "" self._attribute10 = "" self._attribute10expr = "" self._attribute10friendlyname = "" self._attribute10format = "" self._attribute11 = "" self._attribute11expr = "" self._attribute11friendlyname = "" self._attribute11format = "" self._attribute12 = "" self._attribute12expr = "" self._attribute12friendlyname = "" self._attribute12format = "" self._attribute13 = "" self._attribute13expr = "" self._attribute13friendlyname = "" self._attribute13format = "" self._attribute14 = "" self._attribute14expr = "" self._attribute14friendlyname = "" self._attribute14format = "" self._attribute15 = "" self._attribute15expr = "" self._attribute15friendlyname = "" self._attribute15format = "" self._attribute16 = "" self._attribute16expr = "" self._attribute16friendlyname = "" self._attribute16format = "" self.___count = 0 @property def name(self) : ur"""Name for the new saml single sign-on profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after an SSO action is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : ur"""Name for the new saml single sign-on profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after an SSO action is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def samlsigningcertname(self) : ur"""Name of the signing authority as given in the SAML server's SSL certificate.<br/>Minimum length = 1. """ try : return self._samlsigningcertname except Exception as e: raise e @samlsigningcertname.setter def samlsigningcertname(self, samlsigningcertname) : ur"""Name of the signing authority as given in the SAML server's SSL certificate.<br/>Minimum length = 1 """ try : self._samlsigningcertname = samlsigningcertname except Exception as e: raise e @property def assertionconsumerserviceurl(self) : ur"""URL to which the assertion is to be sent.<br/>Minimum length = 1. """ try : return self._assertionconsumerserviceurl except Exception as e: raise e @assertionconsumerserviceurl.setter def assertionconsumerserviceurl(self, assertionconsumerserviceurl) : ur"""URL to which the assertion is to be sent.<br/>Minimum length = 1 """ try : self._assertionconsumerserviceurl = assertionconsumerserviceurl except Exception as e: raise e @property def relaystaterule(self) : ur"""Expression to extract relaystate to be sent along with assertion. Evaluation of this expression should return TEXT content. This is typically a targ et url to which user is redirected after the recipient validates SAML token. """ try : return self._relaystaterule except Exception as e: raise e @relaystaterule.setter def relaystaterule(self, relaystaterule) : ur"""Expression to extract relaystate to be sent along with assertion. Evaluation of this expression should return TEXT content. This is typically a targ et url to which user is redirected after the recipient validates SAML token. """ try : self._relaystaterule = relaystaterule except Exception as e: raise e @property def sendpassword(self) : ur"""Option to send password in assertion.<br/>Default value: OFF<br/>Possible values = ON, OFF. """ try : return self._sendpassword except Exception as e: raise e @sendpassword.setter def sendpassword(self, sendpassword) : ur"""Option to send password in assertion.<br/>Default value: OFF<br/>Possible values = ON, OFF """ try : self._sendpassword = sendpassword except Exception as e: raise e @property def samlissuername(self) : ur"""The name to be used in requests sent from Netscaler to IdP to uniquely identify Netscaler.<br/>Minimum length = 1. """ try : return self._samlissuername except Exception as e: raise e @samlissuername.setter def samlissuername(self, samlissuername) : ur"""The name to be used in requests sent from Netscaler to IdP to uniquely identify Netscaler.<br/>Minimum length = 1 """ try : self._samlissuername = samlissuername except Exception as e: raise e @property def signaturealg(self) : ur"""Algorithm to be used to sign/verify SAML transactions.<br/>Default value: RSA-SHA1<br/>Possible values = RSA-SHA1, RSA-SHA256. """ try : return self._signaturealg except Exception as e: raise e @signaturealg.setter def signaturealg(self, signaturealg) : ur"""Algorithm to be used to sign/verify SAML transactions.<br/>Default value: RSA-SHA1<br/>Possible values = RSA-SHA1, RSA-SHA256 """ try : self._signaturealg = signaturealg except Exception as e: raise e @property def digestmethod(self) : ur"""Algorithm to be used to compute/verify digest for SAML transactions.<br/>Default value: SHA1<br/>Possible values = SHA1, SHA256. """ try : return self._digestmethod except Exception as e: raise e @digestmethod.setter def digestmethod(self, digestmethod) : ur"""Algorithm to be used to compute/verify digest for SAML transactions.<br/>Default value: SHA1<br/>Possible values = SHA1, SHA256 """ try : self._digestmethod = digestmethod except Exception as e: raise e @property def audience(self) : ur"""Audience for which assertion sent by IdP is applicable. This is typically entity name or url that represents ServiceProvider.<br/>Maximum length = 256. """ try : return self._audience except Exception as e: raise e @audience.setter def audience(self, audience) : ur"""Audience for which assertion sent by IdP is applicable. This is typically entity name or url that represents ServiceProvider.<br/>Maximum length = 256 """ try : self._audience = audience except Exception as e: raise e @property def nameidformat(self) : ur"""Format of Name Identifier sent in Assertion.<br/>Default value: transient<br/>Possible values = Unspecified, emailAddress, X509SubjectName, WindowsDomainQualifiedName, kerberos, entity, persistent, transient. """ try : return self._nameidformat except Exception as e: raise e @nameidformat.setter def nameidformat(self, nameidformat) : ur"""Format of Name Identifier sent in Assertion.<br/>Default value: transient<br/>Possible values = Unspecified, emailAddress, X509SubjectName, WindowsDomainQualifiedName, kerberos, entity, persistent, transient """ try : self._nameidformat = nameidformat except Exception as e: raise e @property def nameidexpr(self) : ur"""Expression that will be evaluated to obtain NameIdentifier to be sent in assertion.<br/>Maximum length = 128. """ try : return self._nameidexpr except Exception as e: raise e @nameidexpr.setter def nameidexpr(self, nameidexpr) : ur"""Expression that will be evaluated to obtain NameIdentifier to be sent in assertion.<br/>Maximum length = 128 """ try : self._nameidexpr = nameidexpr except Exception as e: raise e @property def attribute1(self) : ur"""Name of attribute1 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute1 except Exception as e: raise e @attribute1.setter def attribute1(self, attribute1) : ur"""Name of attribute1 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute1 = attribute1 except Exception as e: raise e @property def attribute1expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute1expr except Exception as e: raise e @attribute1expr.setter def attribute1expr(self, attribute1expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute1expr = attribute1expr except Exception as e: raise e @property def attribute1friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute1friendlyname except Exception as e: raise e @attribute1friendlyname.setter def attribute1friendlyname(self, attribute1friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute1friendlyname = attribute1friendlyname except Exception as e: raise e @property def attribute1format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute1format except Exception as e: raise e @attribute1format.setter def attribute1format(self, attribute1format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute1format = attribute1format except Exception as e: raise e @property def attribute2(self) : ur"""Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute2 except Exception as e: raise e @attribute2.setter def attribute2(self, attribute2) : ur"""Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute2 = attribute2 except Exception as e: raise e @property def attribute2expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute2expr except Exception as e: raise e @attribute2expr.setter def attribute2expr(self, attribute2expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute2expr = attribute2expr except Exception as e: raise e @property def attribute2friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute2friendlyname except Exception as e: raise e @attribute2friendlyname.setter def attribute2friendlyname(self, attribute2friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute2friendlyname = attribute2friendlyname except Exception as e: raise e @property def attribute2format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute2format except Exception as e: raise e @attribute2format.setter def attribute2format(self, attribute2format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute2format = attribute2format except Exception as e: raise e @property def attribute3(self) : ur"""Name of attribute3 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute3 except Exception as e: raise e @attribute3.setter def attribute3(self, attribute3) : ur"""Name of attribute3 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute3 = attribute3 except Exception as e: raise e @property def attribute3expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute3expr except Exception as e: raise e @attribute3expr.setter def attribute3expr(self, attribute3expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute3expr = attribute3expr except Exception as e: raise e @property def attribute3friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute3friendlyname except Exception as e: raise e @attribute3friendlyname.setter def attribute3friendlyname(self, attribute3friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute3friendlyname = attribute3friendlyname except Exception as e: raise e @property def attribute3format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute3format except Exception as e: raise e @attribute3format.setter def attribute3format(self, attribute3format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute3format = attribute3format except Exception as e: raise e @property def attribute4(self) : ur"""Name of attribute4 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute4 except Exception as e: raise e @attribute4.setter def attribute4(self, attribute4) : ur"""Name of attribute4 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute4 = attribute4 except Exception as e: raise e @property def attribute4expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute4expr except Exception as e: raise e @attribute4expr.setter def attribute4expr(self, attribute4expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute4expr = attribute4expr except Exception as e: raise e @property def attribute4friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute4friendlyname except Exception as e: raise e @attribute4friendlyname.setter def attribute4friendlyname(self, attribute4friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute4friendlyname = attribute4friendlyname except Exception as e: raise e @property def attribute4format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute4format except Exception as e: raise e @attribute4format.setter def attribute4format(self, attribute4format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute4format = attribute4format except Exception as e: raise e @property def attribute5(self) : ur"""Name of attribute5 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute5 except Exception as e: raise e @attribute5.setter def attribute5(self, attribute5) : ur"""Name of attribute5 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute5 = attribute5 except Exception as e: raise e @property def attribute5expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute5expr except Exception as e: raise e @attribute5expr.setter def attribute5expr(self, attribute5expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute5expr = attribute5expr except Exception as e: raise e @property def attribute5friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute5friendlyname except Exception as e: raise e @attribute5friendlyname.setter def attribute5friendlyname(self, attribute5friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute5friendlyname = attribute5friendlyname except Exception as e: raise e @property def attribute5format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute5format except Exception as e: raise e @attribute5format.setter def attribute5format(self, attribute5format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute5format = attribute5format except Exception as e: raise e @property def attribute6(self) : ur"""Name of attribute6 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute6 except Exception as e: raise e @attribute6.setter def attribute6(self, attribute6) : ur"""Name of attribute6 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute6 = attribute6 except Exception as e: raise e @property def attribute6expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute6expr except Exception as e: raise e @attribute6expr.setter def attribute6expr(self, attribute6expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute6expr = attribute6expr except Exception as e: raise e @property def attribute6friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute6friendlyname except Exception as e: raise e @attribute6friendlyname.setter def attribute6friendlyname(self, attribute6friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute6friendlyname = attribute6friendlyname except Exception as e: raise e @property def attribute6format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute6format except Exception as e: raise e @attribute6format.setter def attribute6format(self, attribute6format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute6format = attribute6format except Exception as e: raise e @property def attribute7(self) : ur"""Name of attribute7 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute7 except Exception as e: raise e @attribute7.setter def attribute7(self, attribute7) : ur"""Name of attribute7 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute7 = attribute7 except Exception as e: raise e @property def attribute7expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute7expr except Exception as e: raise e @attribute7expr.setter def attribute7expr(self, attribute7expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute7expr = attribute7expr except Exception as e: raise e @property def attribute7friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute7friendlyname except Exception as e: raise e @attribute7friendlyname.setter def attribute7friendlyname(self, attribute7friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute7friendlyname = attribute7friendlyname except Exception as e: raise e @property def attribute7format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute7format except Exception as e: raise e @attribute7format.setter def attribute7format(self, attribute7format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute7format = attribute7format except Exception as e: raise e @property def attribute8(self) : ur"""Name of attribute8 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute8 except Exception as e: raise e @attribute8.setter def attribute8(self, attribute8) : ur"""Name of attribute8 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute8 = attribute8 except Exception as e: raise e @property def attribute8expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute8expr except Exception as e: raise e @attribute8expr.setter def attribute8expr(self, attribute8expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute8expr = attribute8expr except Exception as e: raise e @property def attribute8friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute8friendlyname except Exception as e: raise e @attribute8friendlyname.setter def attribute8friendlyname(self, attribute8friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute8friendlyname = attribute8friendlyname except Exception as e: raise e @property def attribute8format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute8format except Exception as e: raise e @attribute8format.setter def attribute8format(self, attribute8format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute8format = attribute8format except Exception as e: raise e @property def attribute9(self) : ur"""Name of attribute9 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute9 except Exception as e: raise e @attribute9.setter def attribute9(self, attribute9) : ur"""Name of attribute9 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute9 = attribute9 except Exception as e: raise e @property def attribute9expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute9expr except Exception as e: raise e @attribute9expr.setter def attribute9expr(self, attribute9expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute9expr = attribute9expr except Exception as e: raise e @property def attribute9friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute9friendlyname except Exception as e: raise e @attribute9friendlyname.setter def attribute9friendlyname(self, attribute9friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute9friendlyname = attribute9friendlyname except Exception as e: raise e @property def attribute9format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute9format except Exception as e: raise e @attribute9format.setter def attribute9format(self, attribute9format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute9format = attribute9format except Exception as e: raise e @property def attribute10(self) : ur"""Name of attribute10 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute10 except Exception as e: raise e @attribute10.setter def attribute10(self, attribute10) : ur"""Name of attribute10 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute10 = attribute10 except Exception as e: raise e @property def attribute10expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute10expr except Exception as e: raise e @attribute10expr.setter def attribute10expr(self, attribute10expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute10expr = attribute10expr except Exception as e: raise e @property def attribute10friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute10friendlyname except Exception as e: raise e @attribute10friendlyname.setter def attribute10friendlyname(self, attribute10friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute10friendlyname = attribute10friendlyname except Exception as e: raise e @property def attribute10format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute10format except Exception as e: raise e @attribute10format.setter def attribute10format(self, attribute10format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute10format = attribute10format except Exception as e: raise e @property def attribute11(self) : ur"""Name of attribute11 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute11 except Exception as e: raise e @attribute11.setter def attribute11(self, attribute11) : ur"""Name of attribute11 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute11 = attribute11 except Exception as e: raise e @property def attribute11expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute11expr except Exception as e: raise e @attribute11expr.setter def attribute11expr(self, attribute11expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute11expr = attribute11expr except Exception as e: raise e @property def attribute11friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute11friendlyname except Exception as e: raise e @attribute11friendlyname.setter def attribute11friendlyname(self, attribute11friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute11friendlyname = attribute11friendlyname except Exception as e: raise e @property def attribute11format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute11format except Exception as e: raise e @attribute11format.setter def attribute11format(self, attribute11format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute11format = attribute11format except Exception as e: raise e @property def attribute12(self) : ur"""Name of attribute12 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute12 except Exception as e: raise e @attribute12.setter def attribute12(self, attribute12) : ur"""Name of attribute12 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute12 = attribute12 except Exception as e: raise e @property def attribute12expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute12expr except Exception as e: raise e @attribute12expr.setter def attribute12expr(self, attribute12expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute12expr = attribute12expr except Exception as e: raise e @property def attribute12friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute12friendlyname except Exception as e: raise e @attribute12friendlyname.setter def attribute12friendlyname(self, attribute12friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute12friendlyname = attribute12friendlyname except Exception as e: raise e @property def attribute12format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute12format except Exception as e: raise e @attribute12format.setter def attribute12format(self, attribute12format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute12format = attribute12format except Exception as e: raise e @property def attribute13(self) : ur"""Name of attribute13 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute13 except Exception as e: raise e @attribute13.setter def attribute13(self, attribute13) : ur"""Name of attribute13 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute13 = attribute13 except Exception as e: raise e @property def attribute13expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute13expr except Exception as e: raise e @attribute13expr.setter def attribute13expr(self, attribute13expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute13expr = attribute13expr except Exception as e: raise e @property def attribute13friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute13friendlyname except Exception as e: raise e @attribute13friendlyname.setter def attribute13friendlyname(self, attribute13friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute13friendlyname = attribute13friendlyname except Exception as e: raise e @property def attribute13format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute13format except Exception as e: raise e @attribute13format.setter def attribute13format(self, attribute13format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute13format = attribute13format except Exception as e: raise e @property def attribute14(self) : ur"""Name of attribute14 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute14 except Exception as e: raise e @attribute14.setter def attribute14(self, attribute14) : ur"""Name of attribute14 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute14 = attribute14 except Exception as e: raise e @property def attribute14expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute14expr except Exception as e: raise e @attribute14expr.setter def attribute14expr(self, attribute14expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute14expr = attribute14expr except Exception as e: raise e @property def attribute14friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute14friendlyname except Exception as e: raise e @attribute14friendlyname.setter def attribute14friendlyname(self, attribute14friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute14friendlyname = attribute14friendlyname except Exception as e: raise e @property def attribute14format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute14format except Exception as e: raise e @attribute14format.setter def attribute14format(self, attribute14format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute14format = attribute14format except Exception as e: raise e @property def attribute15(self) : ur"""Name of attribute15 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute15 except Exception as e: raise e @attribute15.setter def attribute15(self, attribute15) : ur"""Name of attribute15 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute15 = attribute15 except Exception as e: raise e @property def attribute15expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute15expr except Exception as e: raise e @attribute15expr.setter def attribute15expr(self, attribute15expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute15expr = attribute15expr except Exception as e: raise e @property def attribute15friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute15friendlyname except Exception as e: raise e @attribute15friendlyname.setter def attribute15friendlyname(self, attribute15friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute15friendlyname = attribute15friendlyname except Exception as e: raise e @property def attribute15format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute15format except Exception as e: raise e @attribute15format.setter def attribute15format(self, attribute15format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute15format = attribute15format except Exception as e: raise e @property def attribute16(self) : ur"""Name of attribute16 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute16 except Exception as e: raise e @attribute16.setter def attribute16(self, attribute16) : ur"""Name of attribute16 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute16 = attribute16 except Exception as e: raise e @property def attribute16expr(self) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128. """ try : return self._attribute16expr except Exception as e: raise e @attribute16expr.setter def attribute16expr(self, attribute16expr) : ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128 """ try : self._attribute16expr = attribute16expr except Exception as e: raise e @property def attribute16friendlyname(self) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64. """ try : return self._attribute16friendlyname except Exception as e: raise e @attribute16friendlyname.setter def attribute16friendlyname(self, attribute16friendlyname) : ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64 """ try : self._attribute16friendlyname = attribute16friendlyname except Exception as e: raise e @property def attribute16format(self) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic. """ try : return self._attribute16format except Exception as e: raise e @attribute16format.setter def attribute16format(self, attribute16format) : ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic """ try : self._attribute16format = attribute16format except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(tmsamlssoprofile_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.tmsamlssoprofile except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : ur""" Use this API to add tmsamlssoprofile. """ try : if type(resource) is not list : addresource = tmsamlssoprofile() addresource.name = resource.name addresource.samlsigningcertname = resource.samlsigningcertname addresource.assertionconsumerserviceurl = resource.assertionconsumerserviceurl addresource.relaystaterule = resource.relaystaterule addresource.sendpassword = resource.sendpassword addresource.samlissuername = resource.samlissuername addresource.signaturealg = resource.signaturealg addresource.digestmethod = resource.digestmethod addresource.audience = resource.audience addresource.nameidformat = resource.nameidformat addresource.nameidexpr = resource.nameidexpr addresource.attribute1 = resource.attribute1 addresource.attribute1expr = resource.attribute1expr addresource.attribute1friendlyname = resource.attribute1friendlyname addresource.attribute1format = resource.attribute1format addresource.attribute2 = resource.attribute2 addresource.attribute2expr = resource.attribute2expr addresource.attribute2friendlyname = resource.attribute2friendlyname addresource.attribute2format = resource.attribute2format addresource.attribute3 = resource.attribute3 addresource.attribute3expr = resource.attribute3expr addresource.attribute3friendlyname = resource.attribute3friendlyname addresource.attribute3format = resource.attribute3format addresource.attribute4 = resource.attribute4 addresource.attribute4expr = resource.attribute4expr addresource.attribute4friendlyname = resource.attribute4friendlyname addresource.attribute4format = resource.attribute4format addresource.attribute5 = resource.attribute5 addresource.attribute5expr = resource.attribute5expr addresource.attribute5friendlyname = resource.attribute5friendlyname addresource.attribute5format = resource.attribute5format addresource.attribute6 = resource.attribute6 addresource.attribute6expr = resource.attribute6expr addresource.attribute6friendlyname = resource.attribute6friendlyname addresource.attribute6format = resource.attribute6format addresource.attribute7 = resource.attribute7 addresource.attribute7expr = resource.attribute7expr addresource.attribute7friendlyname = resource.attribute7friendlyname addresource.attribute7format = resource.attribute7format addresource.attribute8 = resource.attribute8 addresource.attribute8expr = resource.attribute8expr addresource.attribute8friendlyname = resource.attribute8friendlyname addresource.attribute8format = resource.attribute8format addresource.attribute9 = resource.attribute9 addresource.attribute9expr = resource.attribute9expr addresource.attribute9friendlyname = resource.attribute9friendlyname addresource.attribute9format = resource.attribute9format addresource.attribute10 = resource.attribute10 addresource.attribute10expr = resource.attribute10expr addresource.attribute10friendlyname = resource.attribute10friendlyname addresource.attribute10format = resource.attribute10format addresource.attribute11 = resource.attribute11 addresource.attribute11expr = resource.attribute11expr addresource.attribute11friendlyname = resource.attribute11friendlyname addresource.attribute11format = resource.attribute11format addresource.attribute12 = resource.attribute12 addresource.attribute12expr = resource.attribute12expr addresource.attribute12friendlyname = resource.attribute12friendlyname addresource.attribute12format = resource.attribute12format addresource.attribute13 = resource.attribute13 addresource.attribute13expr = resource.attribute13expr addresource.attribute13friendlyname = resource.attribute13friendlyname addresource.attribute13format = resource.attribute13format addresource.attribute14 = resource.attribute14 addresource.attribute14expr = resource.attribute14expr addresource.attribute14friendlyname = resource.attribute14friendlyname addresource.attribute14format = resource.attribute14format addresource.attribute15 = resource.attribute15 addresource.attribute15expr = resource.attribute15expr addresource.attribute15friendlyname = resource.attribute15friendlyname addresource.attribute15format = resource.attribute15format addresource.attribute16 = resource.attribute16 addresource.attribute16expr = resource.attribute16expr addresource.attribute16friendlyname = resource.attribute16friendlyname addresource.attribute16format = resource.attribute16format return addresource.add_resource(client) else : if (resource and len(resource) > 0) : addresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : addresources[i].name = resource[i].name addresources[i].samlsigningcertname = resource[i].samlsigningcertname addresources[i].assertionconsumerserviceurl = resource[i].assertionconsumerserviceurl addresources[i].relaystaterule = resource[i].relaystaterule addresources[i].sendpassword = resource[i].sendpassword addresources[i].samlissuername = resource[i].samlissuername addresources[i].signaturealg = resource[i].signaturealg addresources[i].digestmethod = resource[i].digestmethod addresources[i].audience = resource[i].audience addresources[i].nameidformat = resource[i].nameidformat addresources[i].nameidexpr = resource[i].nameidexpr addresources[i].attribute1 = resource[i].attribute1 addresources[i].attribute1expr = resource[i].attribute1expr addresources[i].attribute1friendlyname = resource[i].attribute1friendlyname addresources[i].attribute1format = resource[i].attribute1format addresources[i].attribute2 = resource[i].attribute2 addresources[i].attribute2expr = resource[i].attribute2expr addresources[i].attribute2friendlyname = resource[i].attribute2friendlyname addresources[i].attribute2format = resource[i].attribute2format addresources[i].attribute3 = resource[i].attribute3 addresources[i].attribute3expr = resource[i].attribute3expr addresources[i].attribute3friendlyname = resource[i].attribute3friendlyname addresources[i].attribute3format = resource[i].attribute3format addresources[i].attribute4 = resource[i].attribute4 addresources[i].attribute4expr = resource[i].attribute4expr addresources[i].attribute4friendlyname = resource[i].attribute4friendlyname addresources[i].attribute4format = resource[i].attribute4format addresources[i].attribute5 = resource[i].attribute5 addresources[i].attribute5expr = resource[i].attribute5expr addresources[i].attribute5friendlyname = resource[i].attribute5friendlyname addresources[i].attribute5format = resource[i].attribute5format addresources[i].attribute6 = resource[i].attribute6 addresources[i].attribute6expr = resource[i].attribute6expr addresources[i].attribute6friendlyname = resource[i].attribute6friendlyname addresources[i].attribute6format = resource[i].attribute6format addresources[i].attribute7 = resource[i].attribute7 addresources[i].attribute7expr = resource[i].attribute7expr addresources[i].attribute7friendlyname = resource[i].attribute7friendlyname addresources[i].attribute7format = resource[i].attribute7format addresources[i].attribute8 = resource[i].attribute8 addresources[i].attribute8expr = resource[i].attribute8expr addresources[i].attribute8friendlyname = resource[i].attribute8friendlyname addresources[i].attribute8format = resource[i].attribute8format addresources[i].attribute9 = resource[i].attribute9 addresources[i].attribute9expr = resource[i].attribute9expr addresources[i].attribute9friendlyname = resource[i].attribute9friendlyname addresources[i].attribute9format = resource[i].attribute9format addresources[i].attribute10 = resource[i].attribute10 addresources[i].attribute10expr = resource[i].attribute10expr addresources[i].attribute10friendlyname = resource[i].attribute10friendlyname addresources[i].attribute10format = resource[i].attribute10format addresources[i].attribute11 = resource[i].attribute11 addresources[i].attribute11expr = resource[i].attribute11expr addresources[i].attribute11friendlyname = resource[i].attribute11friendlyname addresources[i].attribute11format = resource[i].attribute11format addresources[i].attribute12 = resource[i].attribute12 addresources[i].attribute12expr = resource[i].attribute12expr addresources[i].attribute12friendlyname = resource[i].attribute12friendlyname addresources[i].attribute12format = resource[i].attribute12format addresources[i].attribute13 = resource[i].attribute13 addresources[i].attribute13expr = resource[i].attribute13expr addresources[i].attribute13friendlyname = resource[i].attribute13friendlyname addresources[i].attribute13format = resource[i].attribute13format addresources[i].attribute14 = resource[i].attribute14 addresources[i].attribute14expr = resource[i].attribute14expr addresources[i].attribute14friendlyname = resource[i].attribute14friendlyname addresources[i].attribute14format = resource[i].attribute14format addresources[i].attribute15 = resource[i].attribute15 addresources[i].attribute15expr = resource[i].attribute15expr addresources[i].attribute15friendlyname = resource[i].attribute15friendlyname addresources[i].attribute15format = resource[i].attribute15format addresources[i].attribute16 = resource[i].attribute16 addresources[i].attribute16expr = resource[i].attribute16expr addresources[i].attribute16friendlyname = resource[i].attribute16friendlyname addresources[i].attribute16format = resource[i].attribute16format result = cls.add_bulk_request(client, addresources) return result except Exception as e : raise e @classmethod def delete(cls, client, resource) : ur""" Use this API to delete tmsamlssoprofile. """ try : if type(resource) is not list : deleteresource = tmsamlssoprofile() if type(resource) != type(deleteresource): deleteresource.name = resource else : deleteresource.name = resource.name return deleteresource.delete_resource(client) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : deleteresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i] else : if (resource and len(resource) > 0) : deleteresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i].name result = cls.delete_bulk_request(client, deleteresources) return result except Exception as e : raise e @classmethod def update(cls, client, resource) : ur""" Use this API to update tmsamlssoprofile. """ try : if type(resource) is not list : updateresource = tmsamlssoprofile() updateresource.name = resource.name updateresource.samlsigningcertname = resource.samlsigningcertname updateresource.assertionconsumerserviceurl = resource.assertionconsumerserviceurl updateresource.sendpassword = resource.sendpassword updateresource.samlissuername = resource.samlissuername updateresource.relaystaterule = resource.relaystaterule updateresource.signaturealg = resource.signaturealg updateresource.digestmethod = resource.digestmethod updateresource.audience = resource.audience updateresource.nameidformat = resource.nameidformat updateresource.nameidexpr = resource.nameidexpr updateresource.attribute1 = resource.attribute1 updateresource.attribute1expr = resource.attribute1expr updateresource.attribute1friendlyname = resource.attribute1friendlyname updateresource.attribute1format = resource.attribute1format updateresource.attribute2 = resource.attribute2 updateresource.attribute2expr = resource.attribute2expr updateresource.attribute2friendlyname = resource.attribute2friendlyname updateresource.attribute2format = resource.attribute2format updateresource.attribute3 = resource.attribute3 updateresource.attribute3expr = resource.attribute3expr updateresource.attribute3friendlyname = resource.attribute3friendlyname updateresource.attribute3format = resource.attribute3format updateresource.attribute4 = resource.attribute4 updateresource.attribute4expr = resource.attribute4expr updateresource.attribute4friendlyname = resource.attribute4friendlyname updateresource.attribute4format = resource.attribute4format updateresource.attribute5 = resource.attribute5 updateresource.attribute5expr = resource.attribute5expr updateresource.attribute5friendlyname = resource.attribute5friendlyname updateresource.attribute5format = resource.attribute5format updateresource.attribute6 = resource.attribute6 updateresource.attribute6expr = resource.attribute6expr updateresource.attribute6friendlyname = resource.attribute6friendlyname updateresource.attribute6format = resource.attribute6format updateresource.attribute7 = resource.attribute7 updateresource.attribute7expr = resource.attribute7expr updateresource.attribute7friendlyname = resource.attribute7friendlyname updateresource.attribute7format = resource.attribute7format updateresource.attribute8 = resource.attribute8 updateresource.attribute8expr = resource.attribute8expr updateresource.attribute8friendlyname = resource.attribute8friendlyname updateresource.attribute8format = resource.attribute8format updateresource.attribute9 = resource.attribute9 updateresource.attribute9expr = resource.attribute9expr updateresource.attribute9friendlyname = resource.attribute9friendlyname updateresource.attribute9format = resource.attribute9format updateresource.attribute10 = resource.attribute10 updateresource.attribute10expr = resource.attribute10expr updateresource.attribute10friendlyname = resource.attribute10friendlyname updateresource.attribute10format = resource.attribute10format updateresource.attribute11 = resource.attribute11 updateresource.attribute11expr = resource.attribute11expr updateresource.attribute11friendlyname = resource.attribute11friendlyname updateresource.attribute11format = resource.attribute11format updateresource.attribute12 = resource.attribute12 updateresource.attribute12expr = resource.attribute12expr updateresource.attribute12friendlyname = resource.attribute12friendlyname updateresource.attribute12format = resource.attribute12format updateresource.attribute13 = resource.attribute13 updateresource.attribute13expr = resource.attribute13expr updateresource.attribute13friendlyname = resource.attribute13friendlyname updateresource.attribute13format = resource.attribute13format updateresource.attribute14 = resource.attribute14 updateresource.attribute14expr = resource.attribute14expr updateresource.attribute14friendlyname = resource.attribute14friendlyname updateresource.attribute14format = resource.attribute14format updateresource.attribute15 = resource.attribute15 updateresource.attribute15expr = resource.attribute15expr updateresource.attribute15friendlyname = resource.attribute15friendlyname updateresource.attribute15format = resource.attribute15format updateresource.attribute16 = resource.attribute16 updateresource.attribute16expr = resource.attribute16expr updateresource.attribute16friendlyname = resource.attribute16friendlyname updateresource.attribute16format = resource.attribute16format return updateresource.update_resource(client) else : if (resource and len(resource) > 0) : updateresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].name = resource[i].name updateresources[i].samlsigningcertname = resource[i].samlsigningcertname updateresources[i].assertionconsumerserviceurl = resource[i].assertionconsumerserviceurl updateresources[i].sendpassword = resource[i].sendpassword updateresources[i].samlissuername = resource[i].samlissuername updateresources[i].relaystaterule = resource[i].relaystaterule updateresources[i].signaturealg = resource[i].signaturealg updateresources[i].digestmethod = resource[i].digestmethod updateresources[i].audience = resource[i].audience updateresources[i].nameidformat = resource[i].nameidformat updateresources[i].nameidexpr = resource[i].nameidexpr updateresources[i].attribute1 = resource[i].attribute1 updateresources[i].attribute1expr = resource[i].attribute1expr updateresources[i].attribute1friendlyname = resource[i].attribute1friendlyname updateresources[i].attribute1format = resource[i].attribute1format updateresources[i].attribute2 = resource[i].attribute2 updateresources[i].attribute2expr = resource[i].attribute2expr updateresources[i].attribute2friendlyname = resource[i].attribute2friendlyname updateresources[i].attribute2format = resource[i].attribute2format updateresources[i].attribute3 = resource[i].attribute3 updateresources[i].attribute3expr = resource[i].attribute3expr updateresources[i].attribute3friendlyname = resource[i].attribute3friendlyname updateresources[i].attribute3format = resource[i].attribute3format updateresources[i].attribute4 = resource[i].attribute4 updateresources[i].attribute4expr = resource[i].attribute4expr updateresources[i].attribute4friendlyname = resource[i].attribute4friendlyname updateresources[i].attribute4format = resource[i].attribute4format updateresources[i].attribute5 = resource[i].attribute5 updateresources[i].attribute5expr = resource[i].attribute5expr updateresources[i].attribute5friendlyname = resource[i].attribute5friendlyname updateresources[i].attribute5format = resource[i].attribute5format updateresources[i].attribute6 = resource[i].attribute6 updateresources[i].attribute6expr = resource[i].attribute6expr updateresources[i].attribute6friendlyname = resource[i].attribute6friendlyname updateresources[i].attribute6format = resource[i].attribute6format updateresources[i].attribute7 = resource[i].attribute7 updateresources[i].attribute7expr = resource[i].attribute7expr updateresources[i].attribute7friendlyname = resource[i].attribute7friendlyname updateresources[i].attribute7format = resource[i].attribute7format updateresources[i].attribute8 = resource[i].attribute8 updateresources[i].attribute8expr = resource[i].attribute8expr updateresources[i].attribute8friendlyname = resource[i].attribute8friendlyname updateresources[i].attribute8format = resource[i].attribute8format updateresources[i].attribute9 = resource[i].attribute9 updateresources[i].attribute9expr = resource[i].attribute9expr updateresources[i].attribute9friendlyname = resource[i].attribute9friendlyname updateresources[i].attribute9format = resource[i].attribute9format updateresources[i].attribute10 = resource[i].attribute10 updateresources[i].attribute10expr = resource[i].attribute10expr updateresources[i].attribute10friendlyname = resource[i].attribute10friendlyname updateresources[i].attribute10format = resource[i].attribute10format updateresources[i].attribute11 = resource[i].attribute11 updateresources[i].attribute11expr = resource[i].attribute11expr updateresources[i].attribute11friendlyname = resource[i].attribute11friendlyname updateresources[i].attribute11format = resource[i].attribute11format updateresources[i].attribute12 = resource[i].attribute12 updateresources[i].attribute12expr = resource[i].attribute12expr updateresources[i].attribute12friendlyname = resource[i].attribute12friendlyname updateresources[i].attribute12format = resource[i].attribute12format updateresources[i].attribute13 = resource[i].attribute13 updateresources[i].attribute13expr = resource[i].attribute13expr updateresources[i].attribute13friendlyname = resource[i].attribute13friendlyname updateresources[i].attribute13format = resource[i].attribute13format updateresources[i].attribute14 = resource[i].attribute14 updateresources[i].attribute14expr = resource[i].attribute14expr updateresources[i].attribute14friendlyname = resource[i].attribute14friendlyname updateresources[i].attribute14format = resource[i].attribute14format updateresources[i].attribute15 = resource[i].attribute15 updateresources[i].attribute15expr = resource[i].attribute15expr updateresources[i].attribute15friendlyname = resource[i].attribute15friendlyname updateresources[i].attribute15format = resource[i].attribute15format updateresources[i].attribute16 = resource[i].attribute16 updateresources[i].attribute16expr = resource[i].attribute16expr updateresources[i].attribute16friendlyname = resource[i].attribute16friendlyname updateresources[i].attribute16format = resource[i].attribute16format result = cls.update_bulk_request(client, updateresources) return result except Exception as e : raise e @classmethod def unset(cls, client, resource, args) : ur""" Use this API to unset the properties of tmsamlssoprofile resource. Properties that need to be unset are specified in args array. """ try : if type(resource) is not list : unsetresource = tmsamlssoprofile() if type(resource) != type(unsetresource): unsetresource.name = resource else : unsetresource.name = resource.name return unsetresource.unset_resource(client, args) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : unsetresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : unsetresources[i].name = resource[i] else : if (resource and len(resource) > 0) : unsetresources = [ tmsamlssoprofile() for _ in range(len(resource))] for i in range(len(resource)) : unsetresources[i].name = resource[i].name result = cls.unset_bulk_request(client, unsetresources, args) return result except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : ur""" Use this API to fetch all the tmsamlssoprofile resources that are configured on netscaler. """ try : if not name : obj = tmsamlssoprofile() response = obj.get_resources(client, option_) else : if type(name) != cls : if type(name) is not list : obj = tmsamlssoprofile() obj.name = name response = obj.get_resource(client, option_) else : if name and len(name) > 0 : response = [tmsamlssoprofile() for _ in range(len(name))] obj = [tmsamlssoprofile() for _ in range(len(name))] for i in range(len(name)) : obj[i] = tmsamlssoprofile() obj[i].name = name[i] response[i] = obj[i].get_resource(client, option_) return response except Exception as e : raise e @classmethod def get_filtered(cls, client, filter_) : ur""" Use this API to fetch filtered set of tmsamlssoprofile resources. filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = tmsamlssoprofile() option_ = options() option_.filter = filter_ response = obj.getfiltered(client, option_) return response except Exception as e : raise e @classmethod def count(cls, client) : ur""" Use this API to count the tmsamlssoprofile resources configured on NetScaler. """ try : obj = tmsamlssoprofile() option_ = options() option_.count = True response = obj.get_resources(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e @classmethod def count_filtered(cls, client, filter_) : ur""" Use this API to count filtered the set of tmsamlssoprofile resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = tmsamlssoprofile() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e class Attribute3format: URI = "URI" Basic = "Basic" class Sendpassword: ON = "ON" OFF = "OFF" class Attribute6format: URI = "URI" Basic = "Basic" class Attribute10format: URI = "URI" Basic = "Basic" class Attribute9format: URI = "URI" Basic = "Basic" class Nameidformat: Unspecified = "Unspecified" emailAddress = "emailAddress" X509SubjectName = "X509SubjectName" WindowsDomainQualifiedName = "WindowsDomainQualifiedName" kerberos = "kerberos" entity = "entity" persistent = "persistent" Transient = "transient" class Signaturealg: RSA_SHA1 = "RSA-SHA1" RSA_SHA256 = "RSA-SHA256" class Attribute1format: URI = "URI" Basic = "Basic" class Attribute12format: URI = "URI" Basic = "Basic" class Attribute8format: URI = "URI" Basic = "Basic" class Attribute5format: URI = "URI" Basic = "Basic" class Attribute7format: URI = "URI" Basic = "Basic" class Attribute15format: URI = "URI" Basic = "Basic" class Digestmethod: SHA1 = "SHA1" SHA256 = "SHA256" class Attribute2format: URI = "URI" Basic = "Basic" class Attribute4format: URI = "URI" Basic = "Basic" class Attribute13format: URI = "URI" Basic = "Basic" class Attribute14format: URI = "URI" Basic = "Basic" class Attribute16format: URI = "URI" Basic = "Basic" class Attribute11format: URI = "URI" Basic = "Basic" class tmsamlssoprofile_response(base_response) : def __init__(self, length=1) : self.tmsamlssoprofile = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.tmsamlssoprofile = [tmsamlssoprofile() for _ in range(length)]
apache-2.0
askeing/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/hsts_wsh.py
486
1784
# Copyright 2013, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def web_socket_do_extra_handshake(request): request.extra_headers.append( ('Strict-Transport-Security', 'max-age=86400')) def web_socket_transfer_data(request): request.ws_stream.send_message('Hello', binary=False) # vi:sts=4 sw=4 et
mpl-2.0
siddharths067/HuHubaProject
lib/urllib3/util/__init__.py
204
1044
from __future__ import absolute_import # For backwards compatibility, provide imports that used to be here. from .connection import is_connection_dropped from .request import make_headers from .response import is_fp_closed from .ssl_ import ( SSLContext, HAS_SNI, IS_PYOPENSSL, IS_SECURETRANSPORT, assert_fingerprint, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .timeout import ( current_time, Timeout, ) from .retry import Retry from .url import ( get_host, parse_url, split_first, Url, ) from .wait import ( wait_for_read, wait_for_write ) __all__ = ( 'HAS_SNI', 'IS_PYOPENSSL', 'IS_SECURETRANSPORT', 'SSLContext', 'Retry', 'Timeout', 'Url', 'assert_fingerprint', 'current_time', 'is_connection_dropped', 'is_fp_closed', 'get_host', 'parse_url', 'make_headers', 'resolve_cert_reqs', 'resolve_ssl_version', 'split_first', 'ssl_wrap_socket', 'wait_for_read', 'wait_for_write' )
mit
SRabbelier/Melange
thirdparty/google_appengine/google/appengine/ext/bulkload/transform.py
3
15896
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Bulkloader Transform Helper functions. A collection of helper functions for bulkloading data, typically referenced from a bulkloader.yaml file. """ import base64 import datetime import os import re import tempfile from google.appengine.api import datastore from google.appengine.api import datastore_types from google.appengine.ext.bulkload import bulkloader_errors CURRENT_PROPERTY = None KEY_TYPE_NAME = 'name' KEY_TYPE_ID = 'ID' # Decorators def none_if_empty(fn): """A decorator which returns None if its input is empty else fn(x). Useful on import. Can be used in config files (e.g. "transform.none_if_empty(int)" or as a decorator. Args: fn: Single argument transform function. Returns: Wrapped function. """ def wrapper(value): if value == '' or value is None or value == []: return None return fn(value) return wrapper def empty_if_none(fn): """A wrapper for a value to return '' if it's None. Useful on export. Can be used in config files (e.g. "transform.empty_if_none(unicode)" or as a decorator. Args: fn: Single argument transform function. Returns: Wrapped function. """ def wrapper(value): if value is None: return '' return fn(value) return wrapper # Key helpers. def create_foreign_key(kind, key_is_id=False): """A method to make one-level Key objects. These are typically used in ReferenceProperty in Python, where the reference value is a key with kind (or model) name name. This helper method does not support keys with parents. Use create_deep_key instead to create keys with parents. Args: kind: The kind name of the reference as a string. key_is_id: If true, convert the key into an integer to be used as an id. If false, leave the key in the input format (typically a string). Returns: Single argument method which parses a value into a Key of kind entity_kind. """ def generate_foreign_key_lambda(value): if key_is_id: value = int(value) return datastore.Key.from_path(kind, value) return generate_foreign_key_lambda def create_deep_key(*path_info): """A method to make multi-level Key objects. Generates multi-level key from multiple fields in the input dictionary. This is typically used for Keys for entities which have variable parent keys, e.g. ones with owned relationships. It can used for both __key__ and references. Use create_foreign_key as a simpler way to create single level keys. Args: path_info: List of tuples, describing (kind, property, is_id=False). kind: The kind name. property: The external property in the current import dictionary, or transform.CURRENT_PROPERTY for the value passed to the transform. is_id: Converts value to int and treats as numeric ID if True, otherwise the value is a string name. Default is False. Example: create_deep_key(('rootkind', 'rootcolumn'), ('childkind', 'childcolumn', True), ('leafkind', transform.CURRENT_PROPERTY)) Returns: Transform method which parses the info from the current neutral dictionary into a Key with parents as described by path_info. """ validated_path_info = [] for level_info in path_info: if len(level_info) == 3: key_is_id = level_info[2] elif len(level_info) == 2: key_is_id = False else: raise bulkloader_errors.InvalidConfiguration( 'Each list in create_deep_key must specify exactly 2 or 3 ' 'parameters, (kind, property, is_id=False). You specified: %s' % repr(path_info)) kind_name = level_info[0] property_name = level_info[1] validated_path_info.append((kind_name, property_name, key_is_id)) def create_deep_key_lambda(value, bulkload_state): path = [] for kind_name, property_name, key_is_id in validated_path_info: if property_name is CURRENT_PROPERTY: name_or_id = value else: name_or_id = bulkload_state.current_dictionary[property_name] if key_is_id: name_or_id = int(name_or_id) path += [kind_name, name_or_id] return datastore.Key.from_path(*path) return create_deep_key_lambda def _key_id_or_name_n(key, index): """Internal helper function for key id and name transforms. Args: key: A datastore key. index: The depth in the key to return; 0 is root, -1 is leaf. Returns: The id or name of the nth deep sub key in key. """ if not key: return None path = key.to_path() if not path: return None path_index = (index * 2) + 1 return path[path_index] def key_id_or_name_as_string_n(index): """Pull out the nth (0-based) key id or name from a key which has parents. If a key is present, return its id or name as a string. Note that this loses the distinction between integer IDs and strings which happen to look like integers. Use key_type to distinguish them. This is a useful complement to create_deep_key. Args: index: The depth of the id or name to extract. Zero is the root key. Negative one is the leaf key. Returns: Function extracting the name or ID of the key at depth index, as a unicode string. Returns '' if key is empty (unsaved), otherwise raises IndexError if the key is not as deep as described. """ def transform_function(key): id_or_name = _key_id_or_name_n(key, index) if not id_or_name: return u'' return unicode(id_or_name) return transform_function # # Commonly used helper which returns the value of the leaf key. key_id_or_name_as_string = key_id_or_name_as_string_n(-1) def key_type_n(index): """Pull out the nth (0-based) key type from a key which has parents. This is most useful when paired with key_id_or_name_as_string_n. This is a useful complement to create_deep_key. Args: index: The depth of the id or name to extract. Zero is the root key. Negative one is the leaf key. Returns: Method returning the type ('ID' or 'name') of the key at depth index. Returns '' if key is empty (unsaved), otherwise raises IndexError if the key is not as deep as described. """ def transform_function(key): id_or_name = _key_id_or_name_n(key, index) if id_or_name is None: return '' if isinstance(id_or_name, basestring): return KEY_TYPE_NAME return KEY_TYPE_ID return transform_function # # Commonly used helper which returns the type of the leaf key. key_type = key_type_n(-1) def key_kind_n(index): """Pull out the nth (0-based) key kind from a key which has parents. This is a useful complement to create_deep_key. Args: index: The depth of the id or name to extract. Zero is the root key. Negative one is the leaf key. Returns: Function returning the kind of the key at depth index, or raising IndexError if the key is not as deep as described. """ @empty_if_none def transform_function(key): path = key.to_path() path_index = (index * 2) return unicode(path[path_index]) return transform_function # Commonly used helper which returns the kind of the leaf key. key_kind = key_kind_n(-1) # Blob and ByteString helpers. @none_if_empty def blobproperty_from_base64(value): """Return a datastore blob property containing the base64 decoded value.""" decoded_value = base64.b64decode(value) return datastore_types.Blob(decoded_value) @none_if_empty def bytestring_from_base64(value): """Return a datastore bytestring property from a base64 encoded value.""" decoded_value = base64.b64decode(value) return datastore_types.ByteString(decoded_value) def blob_to_file(filename_hint_propertyname=None, directory_hint=''): """Write the blob contents to a file, and replace them with the filename. Args: filename_hint_propertyname: If present, the filename will begin with the contents of this value in the entity being exported. directory_hint: If present, the files will be stored in this directory. Returns: A function which writes the input blob to a file. """ directory = [] def transform_function(value, bulkload_state): if not directory: parent_dir = os.path.dirname(bulkload_state.filename) directory.append(os.path.join(parent_dir, directory_hint)) if directory[0] and not os.path.exists(directory[0]): os.makedirs(directory[0]) filename_hint = 'blob_' suffix = '' filename = '' if filename_hint_propertyname: filename_hint = bulkload_state.current_entity[filename_hint_propertyname] filename = os.path.join(directory[0], filename_hint) if os.path.exists(filename): filename = '' (filename_hint, suffix) = os.path.splitext(filename_hint) if not filename: filename = tempfile.mktemp(suffix, filename_hint, directory[0]) f = open(filename, 'wb') f.write(value) f.close() return filename return transform_function # Formatted string helpers: Extract, convert to boolean, date, or list. def import_date_time(format, _strptime=None): """A wrapper around strptime. Also returns None if the input is empty. Args: format: Format string for strptime. Returns: Single argument method which parses a string into a datetime using format. """ if not _strptime: _strptime = datetime.datetime.strptime def import_date_time_lambda(value): if not value: return None return _strptime(value, format) return import_date_time_lambda def export_date_time(format): """A wrapper around strftime. Also returns '' if the input is None. Args: format: Format string for strftime. Returns: Single argument method which convers a datetime into a string using format. """ def export_date_time_lambda(value): if not value: return '' return datetime.datetime.strftime(value, format) return export_date_time_lambda def regexp_extract(pattern, method=re.match, group=1): """Return first group in the value matching the pattern using re.match. Args: pattern: A regular expression to match on with at least one group. method: The method to use for matching; normally re.match or re.search. group: The group to use for extracting a value. Returns: A single argument method which returns the group_arg group matched, or None if no match was found or the input was empty. """ def regexp_extract_lambda(value): if not value: return None matches = method(pattern, value) if not matches: return None return matches.group(group) return regexp_extract_lambda def regexp_to_list(pattern): """Return function that returns a list of objects that match the regex. Useful on import. Uses the provided regex to split a string value into a list of strings. Wrapped by none_if_input_or_result_empty, so returns none if there are no matches for the regex and none if the input is empty. Args: pattern: A regular expression pattern to match against the input string. Returns: None if the input was none or no matches were found, otherwise a list of strings matching the input expression. """ @none_if_empty def regexp_to_list_lambda(value): result = re.findall(pattern, value) if result == []: return None return result return regexp_to_list_lambda def regexp_bool(regexp, flags=0): """Return a boolean if the expression matches with re.match. Note that re.match anchors at the start but not end of the string. Args: regexp: String, regular expression. flags: Optional flags to pass to re.match. Returns: Method which returns a Boolean if the expression matches. """ def transform_function(value): return bool(re.match(regexp, value, flags)) return transform_function def split_string(delimeter): """Split a string using the delimeter into a list. This is just a wrapper for string.split. Args: delimeter: The delimiter to split the string on. Returns: Method which splits the string into a list along the delimeter. """ def split_string_lambda(value): return value.split(delimeter) return split_string_lambda def join_list(delimeter): """Join a list into a string using the delimeter. This is just a wrapper for string.join. Args: delimeter: The delimiter to use when joining the string. Returns: Method which joins the list into a string with the delimeter. """ def join_string_lambda(value): return delimeter.join(value) return join_string_lambda def list_from_multiproperty(*external_names): """Create a list from multiple properties. Args: external_names: List of the properties to use. Returns: Transform function which returns a list of the properties in external_names. """ def list_from_multiproperty_lambda(unused_value, bulkload_state): result = [] for external_name in external_names: value = bulkload_state.current_dictionary.get(external_name) if value: result.append(value) return result return list_from_multiproperty_lambda def property_from_list(index): """Return the Nth item from a list, or '' if the list is shorter. Args: index: Item in the list to return. Returns: Function returning the item from a list, or '' if the list is too short. """ @empty_if_none def property_from_list_lambda(values): if len(values) > index: return values[index] return '' return property_from_list_lambda # SimpleXML list Helpers def list_from_child_node(xpath, suppress_blank=False): """Return a list property from child nodes of the current xml node. This applies only the simplexml helper, as it assumes __node__, the current ElementTree node corresponding to the import record. Sample usage for structure: <Visit> <VisitActivities> <Activity>A1</Activity> <Activity>A2</Activity> </VisitActivities> </Visit> property: activities external_name: VisitActivities # Ignored on import, used on export. import_transform: list_from_xml_node('VisitActivities/Activity') export_transform: child_node_from_list('Activity') Args: xpath: XPath to run on the current node. suppress_blank: if True, ndoes with no text will be skipped. Returns: Transform function which works as described in the args. """ def list_from_child_node_lambda(unused_value, bulkload_state): result = [] for node in bulkload_state.current_dictionary['__node__'].findall(xpath): if node.text: result.append(node.text) elif not suppress_blank: result.append('') return result return list_from_child_node_lambda def child_node_from_list(child_node_name): """Return a value suitable for generating an XML child node on export. The return value is a list of tuples which the simplexml connector will use to build a child node. See also list_from_child_node Args: child_node_name: The name to use for each child node. Returns: Transform function which works as described in the args. """ def child_node_from_list_lambda(values): return [(child_node_name, value) for value in values] return child_node_from_list_lambda
apache-2.0
anaran/olympia
apps/amo/management/commands/clean_redis.py
9
3337
import logging import os import socket import subprocess import sys import tempfile import time from django.core.management.base import BaseCommand import redisutils import redis as redislib log = logging.getLogger('z.redis') # We process the keys in chunks of size CHUNK. CHUNK = 3000 # Remove any sets with less than MIN or more than MAX elements. MIN = 10 MAX = 50 # Expire keys after EXPIRE seconds. EXPIRE = 60 * 5 # Calling redis can raise raise these errors. RedisError = redislib.RedisError, socket.error def vacuum(master, slave): def keys(): ks = slave.keys() log.info('There are %s keys to clean up.' % len(ks)) ks = iter(ks) while 1: buffer = [] for _ in xrange(CHUNK): try: buffer.append(ks.next()) except StopIteration: yield buffer return yield buffer tmp = tempfile.NamedTemporaryFile(delete=False) for ks in keys(): tmp.write('\n'.join(ks)) tmp.close() # It's hard to get Python to clean up the memory from slave.keys(), so # we'll let the OS do it. You have to pass sys.executable both as the # thing to run and so argv[0] is set properly. os.execl(sys.executable, sys.executable, sys.argv[0], sys.argv[1], tmp.name) def cleanup(master, slave, filename): tmp = open(filename) total = [1, 0] p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE) total[0] = int(p.communicate()[0].strip().split()[0]) def file_keys(): while 1: buffer = [] for _ in xrange(CHUNK): line = tmp.readline() if line: buffer.append(line.strip()) else: yield buffer return yield buffer num = 0 for ks in file_keys(): pipe = slave.pipeline() for k in ks: pipe.scard(k) try: drop = [k for k, size in zip(ks, pipe.execute()) if 0 < size < MIN or size > MAX] except RedisError: continue num += len(ks) percent = round(float(num) / total[0] * 100, 1) if total[0] else 0 total[1] += len(drop) log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop))) pipe = master.pipeline() for k in drop: pipe.expire(k, EXPIRE) try: pipe.execute() except RedisError: continue time.sleep(1) # Poor man's rate limiting. if total[0]: log.info('Dropped %s keys [%.1f%%].' % (total[1], round(float(total[1]) / total[0] * 100, 1))) class Command(BaseCommand): help = "Clean up the redis used by cache machine." def handle(self, *args, **kw): try: master = redisutils.connections['cache'] slave = redisutils.connections['cache_slave'] except Exception: log.error('Could not connect to redis.', exc_info=True) return if args: filename = args[0] try: cleanup(master, slave, filename) finally: os.unlink(filename) else: vacuum(master, slave)
bsd-3-clause
dataxu/ansible
lib/ansible/parsing/yaml/dumper.py
90
2246
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import yaml from ansible.module_utils.six import PY3 from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode from ansible.utils.unsafe_proxy import AnsibleUnsafeText from ansible.vars.hostvars import HostVars class AnsibleDumper(yaml.SafeDumper): ''' A simple stub class that allows us to add representers for our overridden object types. ''' pass def represent_hostvars(self, data): return self.represent_dict(dict(data)) # Note: only want to represent the encrypted data def represent_vault_encrypted_unicode(self, data): return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|') if PY3: represent_unicode = yaml.representer.SafeRepresenter.represent_str else: represent_unicode = yaml.representer.SafeRepresenter.represent_unicode AnsibleDumper.add_representer( AnsibleUnicode, represent_unicode, ) AnsibleDumper.add_representer( AnsibleUnsafeText, represent_unicode, ) AnsibleDumper.add_representer( HostVars, represent_hostvars, ) AnsibleDumper.add_representer( AnsibleSequence, yaml.representer.SafeRepresenter.represent_list, ) AnsibleDumper.add_representer( AnsibleMapping, yaml.representer.SafeRepresenter.represent_dict, ) AnsibleDumper.add_representer( AnsibleVaultEncryptedUnicode, represent_vault_encrypted_unicode, )
gpl-3.0
akiss77/servo
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_stream.py
446
6264
from __future__ import absolute_import, division, unicode_literals from . import support # flake8: noqa import unittest import codecs from io import BytesIO from six.moves import http_client from html5lib.inputstream import (BufferedStream, HTMLInputStream, HTMLUnicodeInputStream, HTMLBinaryInputStream) class BufferedStreamTest(unittest.TestCase): def test_basic(self): s = b"abc" fp = BufferedStream(BytesIO(s)) read = fp.read(10) assert read == s def test_read_length(self): fp = BufferedStream(BytesIO(b"abcdef")) read1 = fp.read(1) assert read1 == b"a" read2 = fp.read(2) assert read2 == b"bc" read3 = fp.read(3) assert read3 == b"def" read4 = fp.read(4) assert read4 == b"" def test_tell(self): fp = BufferedStream(BytesIO(b"abcdef")) read1 = fp.read(1) assert fp.tell() == 1 read2 = fp.read(2) assert fp.tell() == 3 read3 = fp.read(3) assert fp.tell() == 6 read4 = fp.read(4) assert fp.tell() == 6 def test_seek(self): fp = BufferedStream(BytesIO(b"abcdef")) read1 = fp.read(1) assert read1 == b"a" fp.seek(0) read2 = fp.read(1) assert read2 == b"a" read3 = fp.read(2) assert read3 == b"bc" fp.seek(2) read4 = fp.read(2) assert read4 == b"cd" fp.seek(4) read5 = fp.read(2) assert read5 == b"ef" def test_seek_tell(self): fp = BufferedStream(BytesIO(b"abcdef")) read1 = fp.read(1) assert fp.tell() == 1 fp.seek(0) read2 = fp.read(1) assert fp.tell() == 1 read3 = fp.read(2) assert fp.tell() == 3 fp.seek(2) read4 = fp.read(2) assert fp.tell() == 4 fp.seek(4) read5 = fp.read(2) assert fp.tell() == 6 class HTMLUnicodeInputStreamShortChunk(HTMLUnicodeInputStream): _defaultChunkSize = 2 class HTMLBinaryInputStreamShortChunk(HTMLBinaryInputStream): _defaultChunkSize = 2 class HTMLInputStreamTest(unittest.TestCase): def test_char_ascii(self): stream = HTMLInputStream(b"'", encoding='ascii') self.assertEqual(stream.charEncoding[0], 'ascii') self.assertEqual(stream.char(), "'") def test_char_utf8(self): stream = HTMLInputStream('\u2018'.encode('utf-8'), encoding='utf-8') self.assertEqual(stream.charEncoding[0], 'utf-8') self.assertEqual(stream.char(), '\u2018') def test_char_win1252(self): stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252')) self.assertEqual(stream.charEncoding[0], 'windows-1252') self.assertEqual(stream.char(), "\xa9") self.assertEqual(stream.char(), "\xf1") self.assertEqual(stream.char(), "\u2019") def test_bom(self): stream = HTMLInputStream(codecs.BOM_UTF8 + b"'") self.assertEqual(stream.charEncoding[0], 'utf-8') self.assertEqual(stream.char(), "'") def test_utf_16(self): stream = HTMLInputStream((' ' * 1025).encode('utf-16')) self.assertTrue(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding) self.assertEqual(len(stream.charsUntil(' ', True)), 1025) def test_newlines(self): stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe") self.assertEqual(stream.position(), (1, 0)) self.assertEqual(stream.charsUntil('c'), "a\nbb\n") self.assertEqual(stream.position(), (3, 0)) self.assertEqual(stream.charsUntil('x'), "ccc\ndddd") self.assertEqual(stream.position(), (4, 4)) self.assertEqual(stream.charsUntil('e'), "x") self.assertEqual(stream.position(), (4, 5)) def test_newlines2(self): size = HTMLUnicodeInputStream._defaultChunkSize stream = HTMLInputStream("\r" * size + "\n") self.assertEqual(stream.charsUntil('x'), "\n" * size) def test_position(self): stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh") self.assertEqual(stream.position(), (1, 0)) self.assertEqual(stream.charsUntil('c'), "a\nbb\n") self.assertEqual(stream.position(), (3, 0)) stream.unget("\n") self.assertEqual(stream.position(), (2, 2)) self.assertEqual(stream.charsUntil('c'), "\n") self.assertEqual(stream.position(), (3, 0)) stream.unget("\n") self.assertEqual(stream.position(), (2, 2)) self.assertEqual(stream.char(), "\n") self.assertEqual(stream.position(), (3, 0)) self.assertEqual(stream.charsUntil('e'), "ccc\nddd") self.assertEqual(stream.position(), (4, 3)) self.assertEqual(stream.charsUntil('h'), "e\nf\ng") self.assertEqual(stream.position(), (6, 1)) def test_position2(self): stream = HTMLUnicodeInputStreamShortChunk("abc\nd") self.assertEqual(stream.position(), (1, 0)) self.assertEqual(stream.char(), "a") self.assertEqual(stream.position(), (1, 1)) self.assertEqual(stream.char(), "b") self.assertEqual(stream.position(), (1, 2)) self.assertEqual(stream.char(), "c") self.assertEqual(stream.position(), (1, 3)) self.assertEqual(stream.char(), "\n") self.assertEqual(stream.position(), (2, 0)) self.assertEqual(stream.char(), "d") self.assertEqual(stream.position(), (2, 1)) def test_python_issue_20007(self): """ Make sure we have a work-around for Python bug #20007 http://bugs.python.org/issue20007 """ class FakeSocket(object): def makefile(self, _mode, _bufsize=None): return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText") source = http_client.HTTPResponse(FakeSocket()) source.begin() stream = HTMLInputStream(source) self.assertEqual(stream.charsUntil(" "), "Text") def buildTestSuite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) def main(): buildTestSuite() unittest.main() if __name__ == '__main__': main()
mpl-2.0
turon/openthread
tools/harness-automation/cases/router_9_2_6.py
1
1878
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest from autothreadharness.harness_case import HarnessCase class Router_9_2_6(HarnessCase): role = HarnessCase.ROLE_ROUTER case = '9 2 6' golden_devices_required = 4 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
bsd-3-clause
inspirehep/invenio
modules/bibindex/lib/bibindex_engine_tokenizer_unit_tests.py
5
20948
# -*- coding:utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2010, 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """bibindex_engine_tokenizer_tests - unit tests for tokenizers There should always be at least one test class for each class in b_e_t. """ from invenio.testutils import InvenioTestCase from invenio.testutils import make_test_suite, run_test_suite from invenio.bibindex_engine_utils import load_tokenizers _TOKENIZERS = load_tokenizers() class TestAuthorTokenizerScanning(InvenioTestCase): """Test BibIndex name tokenization""" def setUp(self): self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]() self.scan = self.tokenizer.scan_string_for_phrases def test_bifnt_scan_single(self): """BibIndexAuthorTokenizer - scanning single names like 'Dido'""" teststr = "Dido" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Dido'], 'nonlastnames': [], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_forward(self): """BibIndexAuthorTokenizer - scanning simple Western-style: first last""" teststr = "Ringo Starr" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_reverse(self): """BibIndexAuthorTokenizer - scanning simple Western-style: last, first""" teststr = "Starr, Ringo" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_forward(self): """BibIndexAuthorTokenizer - scanning multiword: first middle last""" teststr = "Michael Edward Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed(self): """BibIndexAuthorTokenizer - scanning multiword: f.m. last""" teststr = "M.E. Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed_reversed(self): """BibIndexAuthorTokenizer - scanning multiword: last, f.m.""" teststr = "Peskin, M.E." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed(self): """BibIndexAuthorTokenizer - scanning multiword: first-middle last""" teststr = "Jean-Luc Picard" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed_reversed(self): """BibIndexAuthorTokenizer - scanning multiword: last, first-middle""" teststr = "Picard, Jean-Luc" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes(self): """BibIndexAuthorTokenizer - scanning multiword: first middle last-last""" teststr = "Cantina Octavia Jones-Smith" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes_reverse(self): """BibIndexAuthorTokenizer - scanning multiword: last-last, first middle""" teststr = "Jones-Smith, Cantina Octavia" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_reverse(self): """BibIndexAuthorTokenizer - scanning compound last: last last, first""" teststr = "Alvarez Gaume, Joachim" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_titled(self): """BibIndexAuthorTokenizer - scanning title-bearing: last, first, title""" teststr = "Epstein, Brian, The Fifth Beatle" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_wildly_interesting(self): """BibIndexAuthorTokenizer - scanning last last last, first first, title, title""" teststr = "Ibanez y Gracia, Maria Luisa, II., ed." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II.', 'ed.'], 'raw' : teststr} self.assertEqual(output, anticipated) class TestAuthorTokenizerTokens(InvenioTestCase): """Test BibIndex name variant token generation from scanned and tagged sets""" def setUp(self): self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]() self.get_index_tokens = self.tokenizer.parse_scanned_for_phrases def test_bifnt_tokenize_single(self): """BibIndexAuthorTokenizer - tokens for single-word name Ronaldo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ronaldo'], 'nonlastnames': [], 'titles': [], 'raw' : 'Ronaldo'} output = self.get_index_tokens(tagged_data) anticipated = ['Ronaldo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_forward(self): """BibIndexAuthorTokenizer - tokens for first last Ringo Starr """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Ringo Starr'} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_reverse(self): """BibIndexAuthorTokenizer - tokens for last, first Starr, Ringo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Starr, Ringo'} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_twoname_forward(self): """BibIndexAuthorTokenizer - tokens for first middle last Michael Edward Peskin """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : 'Michael Edward Peskin'} output = self.get_index_tokens(tagged_data) anticipated = ['E Peskin', 'Edward Peskin', 'M E Peskin', 'M Edward Peskin', 'M Peskin', 'Michael E Peskin', 'Michael Edward Peskin', 'Michael Peskin', 'Peskin, E', 'Peskin, Edward', 'Peskin, M', 'Peskin, M E', 'Peskin, M Edward', 'Peskin, Michael', 'Peskin, Michael E', 'Peskin, Michael Edward'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_compound_last(self): """BibIndexAuthorTokenizer - tokens for last last, first Alvarez Gaume, Joachim """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : 'Alvarez Gaume, Joachim'} output = self.get_index_tokens(tagged_data) anticipated = ['Alvarez Gaume, J', 'Alvarez Gaume, Joachim', 'Alvarez, J', 'Alvarez, Joachim', 'Gaume, J', 'Gaume, Joachim', 'J Alvarez', 'J Alvarez Gaume', 'J Gaume', 'Joachim Alvarez', 'Joachim Alvarez Gaume', 'Joachim Gaume'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_titled(self): """BibIndexAuthorTokenizer - tokens for last, first, title Epstein, Brian, The Fifth Beatle """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : 'Epstein, Brian, The Fifth Beatle'} output = self.get_index_tokens(tagged_data) anticipated = ['B Epstein', 'B Epstein, The Fifth Beatle', 'Brian Epstein', 'Brian Epstein, The Fifth Beatle', 'Epstein, B', 'Epstein, B, The Fifth Beatle', 'Epstein, Brian', 'Epstein, Brian, The Fifth Beatle'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_wildly_interesting(self): """BibIndexAuthorTokenizer - tokens for last last last, first first, title, title Ibanez y Gracia, Maria Luisa, II, (ed.) """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II', '(ed.)'], 'raw' : 'Ibanez y Gracia, Maria Luisa, II, (ed.)'} output = self.get_index_tokens(tagged_data) anticipated = ['Gracia, L', 'Gracia, Luisa', 'Gracia, M', 'Gracia, M L', 'Gracia, M Luisa', 'Gracia, Maria', 'Gracia, Maria L', 'Gracia, Maria Luisa', 'Ibanez y Gracia, L', 'Ibanez y Gracia, L, II', 'Ibanez y Gracia, Luisa', 'Ibanez y Gracia, Luisa, II', 'Ibanez y Gracia, M', 'Ibanez y Gracia, M L', 'Ibanez y Gracia, M L, II', 'Ibanez y Gracia, M Luisa', 'Ibanez y Gracia, M Luisa, II', 'Ibanez y Gracia, M, II', 'Ibanez y Gracia, Maria', 'Ibanez y Gracia, Maria L', 'Ibanez y Gracia, Maria L, II', 'Ibanez y Gracia, Maria Luisa', 'Ibanez y Gracia, Maria Luisa, II', 'Ibanez y Gracia, Maria, II', 'Ibanez, L', 'Ibanez, Luisa', 'Ibanez, M', 'Ibanez, M L', 'Ibanez, M Luisa', 'Ibanez, Maria', 'Ibanez, Maria L', 'Ibanez, Maria Luisa', 'L Gracia', 'L Ibanez', 'L Ibanez y Gracia', 'L Ibanez y Gracia, II', 'Luisa Gracia', 'Luisa Ibanez', 'Luisa Ibanez y Gracia', 'Luisa Ibanez y Gracia, II', 'M Gracia', 'M Ibanez', 'M Ibanez y Gracia', 'M Ibanez y Gracia, II', 'M L Gracia', 'M L Ibanez', 'M L Ibanez y Gracia', 'M L Ibanez y Gracia, II', 'M Luisa Gracia', 'M Luisa Ibanez', 'M Luisa Ibanez y Gracia', 'M Luisa Ibanez y Gracia, II', 'Maria Gracia', 'Maria Ibanez', 'Maria Ibanez y Gracia', 'Maria Ibanez y Gracia, II', 'Maria L Gracia', 'Maria L Ibanez', 'Maria L Ibanez y Gracia', 'Maria L Ibanez y Gracia, II', 'Maria Luisa Gracia', 'Maria Luisa Ibanez', 'Maria Luisa Ibanez y Gracia', 'Maria Luisa Ibanez y Gracia, II'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_multimiddle_forward(self): """BibIndexAuthorTokenizer - tokens for first middle middle last W K H Panofsky """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Panofsky'], 'nonlastnames': ['W', 'K', 'H'], 'titles': [], 'raw' : 'W K H Panofsky'} output = self.get_index_tokens(tagged_data) anticipated = ['H Panofsky', 'K H Panofsky', 'K Panofsky', 'Panofsky, H', 'Panofsky, K', 'Panofsky, K H', 'Panofsky, W', 'Panofsky, W H', 'Panofsky, W K', 'Panofsky, W K H', 'W H Panofsky', 'W K H Panofsky', 'W K Panofsky', 'W Panofsky'] self.assertEqual(output, anticipated) def test_tokenize(self): """BibIndexAuthorTokenizer - check tokenize_for_phrases() Ringo Starr """ teststr = "Ringo Starr" output = self.tokenizer.tokenize_for_phrases(teststr) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) class TestExactAuthorTokenizer(InvenioTestCase): """Test exact author name tokenizer.""" def setUp(self): """setup""" self.tokenizer = _TOKENIZERS["BibIndexExactAuthorTokenizer"]() self.tokenize = self.tokenizer.tokenize_for_phrases def test_exact_author_name_tokenizer_bare(self): """BibIndexExactNameTokenizer - bare name""" self.assertEqual(self.tokenize('John Doe'), ['John Doe']) def test_exact_author_name_tokenizer_dots(self): """BibIndexExactNameTokenizer - name with dots""" self.assertEqual(self.tokenize('J. Doe'), ['J Doe']) self.assertEqual(self.tokenize('J.R. Doe'), ['J R Doe']) self.assertEqual(self.tokenize('J. R. Doe'), ['J R Doe']) def test_exact_author_name_tokenizer_trailing_dots(self): """BibIndexExactNameTokenizer - name with trailing dots""" self.assertEqual(self.tokenize('Doe, J'), ['Doe, J']) self.assertEqual(self.tokenize('Doe, J.'), ['Doe, J']) def test_exact_author_name_tokenizer_hyphens(self): """BibIndexExactNameTokenizer - name with hyphens""" self.assertEqual(self.tokenize('Doe, Jean-Pierre'), ['Doe, Jean Pierre']) class TestCJKTokenizer(InvenioTestCase): """Tests for CJK Tokenizer which splits CJK words into characters and treats every single character as a word""" @classmethod def setUp(self): self.tokenizer = _TOKENIZERS["BibIndexCJKTokenizer"]() def test_tokenize_for_words_phrase_galaxy(self): """tokenizing phrase: galaxy s4据信""" phrase = "galaxy s4据信" result = self.tokenizer.tokenize_for_words(phrase) self.assertEqual(sorted(['galaxy','s4','据','信']), sorted(result)) def test_tokenize_for_words_phrase_with_special_punctuation(self): """tokenizing phrase: 马英九:台湾民""" phrase = u"马英九:台湾民" result = self.tokenizer.tokenize_for_words(phrase) self.assertEqual(sorted(['马','英','九','台','湾','民']), sorted(result)) def test_tokenize_for_words_phrase_with_special_punctuation_two(self): """tokenizing phrase: 色的“刀子嘴”""" phrase = u"色的“刀子嘴”" result = self.tokenizer.tokenize_for_words(phrase) self.assertEqual(sorted(['色','的','刀','子','嘴']), sorted(result)) def test_tokenize_for_words_simple_phrase(self): """tokenizing phrase: 春眠暁覚""" self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁覚')), sorted(['春', '眠', '暁', '覚'])) def test_tokenize_for_words_mixed_phrase(self): """tokenizing phrase: 春眠暁ABC覚""" self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁ABC覚')), sorted(['春', '眠', '暁', 'abc', '覚'])) def test_tokenize_for_words_phrase_with_comma(self): """tokenizing phrase: 春眠暁, 暁""" phrase = u"春眠暁, 暁" self.assertEqual(sorted(self.tokenizer.tokenize_for_words(phrase)), sorted(['春','眠','暁'])) class TestJournalPageTokenizer(InvenioTestCase): """Tests for JournalPage Tokenizer""" @classmethod def setUp(self): self.tokenizer = _TOKENIZERS["BibIndexJournalPageTokenizer"]() def test_tokenize_for_single_page(self): """tokenizing for single page""" test_pairs = [ # simple number ('1', ['1']), ('23', ['23']), ('12312', ['12312']), # letter + number ('C85', ['C85']), ('L45', ['L45']), # roman numbers ('VII', ['VII']), ('X', ['X']), # prefix + simple number ('p.321', ['p.321', '321']), ('pp.321', ['pp.321', '321']), ('cpp.321', ['cpp.321', '321']), ('pag.321', ['pag.321', '321']), # prefix + non-simple page ('p.A45', ['p.A45', 'A45']), ('pp.C83', ['pp.C83', 'C83']), ('p.V', ['p.V', 'V']), ('pp.IV', ['pp.IV', 'IV']), ] for phrase, expected_tokens in test_pairs: result = self.tokenizer.tokenize(phrase) self.assertEqual(sorted(expected_tokens), sorted(result)) def test_tokenize_for_page_range(self): """tokenizing for page range""" test_pairs = [ # simple number ('1-12', ['1', '1-12']), ('22-22', ['22', '22-22']), ('95-12312', ['95', '95-12312']), # letter + number ('C85-D55', ['C85', 'C85-D55']), ('L45-L88', ['L45', 'L45-L88']), # roman numbers ('I-VII', ['I', 'I-VII']), ('VIII-X', ['VIII', 'VIII-X']), # mixed range ('III-12', ['III', 'III-12']), ('343-A10', ['343', '343-A10']), ('IX-B5', ['IX', 'IX-B5']), # prefix + simple number ('p.56-123', ['p.56-123', '56-123', '56']), ('pp.56-123', ['pp.56-123', '56-123', '56']), ('cpp.56-123', ['cpp.56-123', '56-123', '56']), ('pag.56-123', ['pag.56-123', '56-123', '56']), # prefix + non-simple page ('pp.VII-123', ['pp.VII-123', 'VII-123', 'VII']), ] for phrase, expected_tokens in test_pairs: result = self.tokenizer.tokenize(phrase) self.assertEqual(sorted(expected_tokens), sorted(result)) TEST_SUITE = make_test_suite(TestAuthorTokenizerScanning, TestAuthorTokenizerTokens, TestExactAuthorTokenizer, TestCJKTokenizer, TestJournalPageTokenizer) if __name__ == '__main__': run_test_suite(TEST_SUITE)
gpl-2.0
SpokesmanReview/django-boundaryservice
boundaryservice/management/commands/loadshapefiles.py
3
10135
import logging log = logging.getLogger('boundaries.api.load_shapefiles') from optparse import make_option import os, os.path import sys from zipfile import ZipFile from tempfile import mkdtemp from django.conf import settings from django.contrib.gis.gdal import (CoordTransform, DataSource, OGRGeometry, OGRGeomType) from django.core.management.base import BaseCommand from django.db import connections, DEFAULT_DB_ALIAS, transaction from boundaryservice.models import BoundarySet, Boundary DEFAULT_SHAPEFILES_DIR = getattr(settings, 'SHAPEFILES_DIR', 'data/shapefiles') GEOMETRY_COLUMN = 'shape' class Command(BaseCommand): help = 'Import boundaries described by shapefiles.' option_list = BaseCommand.option_list + ( make_option('-c', '--clear', action='store_true', dest='clear', help='Clear all jurisdictions in the DB.'), make_option('-d', '--data-dir', action='store', dest='data_dir', default=DEFAULT_SHAPEFILES_DIR, help='Load shapefiles from this directory'), make_option('-e', '--except', action='store', dest='except', default=False, help='Don\'t load these kinds of Areas, comma-delimited.'), make_option('-o', '--only', action='store', dest='only', default=False, help='Only load these kinds of Areas, comma-delimited.'), make_option('-u', '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Specify a database to load shape data into.'), ) def get_version(self): return '0.1' def handle(self, *args, **options): # Load configuration sys.path.append(options['data_dir']) from definitions import SHAPEFILES if options['only']: only = options['only'].upper().split(',') # TODO: stripping whitespace here because optparse doesn't handle # it correctly sources = [s for s in SHAPEFILES if s.replace(' ', '').upper() in only] elif options['except']: exceptions = options['except'].upper().split(',') # See above sources = [s for s in SHAPEFILES if s.replace(' ', '').upper() not in exceptions] else: sources = [s for s in SHAPEFILES] for kind, config in SHAPEFILES.items(): if kind not in sources: log.info('Skipping %s.' % kind) continue log.info('Processing %s.' % kind) self.load_set(kind, config, options) @transaction.commit_on_success def load_set(self, kind, config, options): log.info('Processing %s.' % kind) if options['clear']: bset = None try: bset = BoundarySet.objects.get(name=kind) if bset: log.info('Clearing old %s.' % kind) bset.boundaries.all().delete() bset.delete() log.info('Loading new %s.' % kind) except BoundarySet.DoesNotExist: log.info('No existing boundary set of kind [%s] so nothing to ' 'delete' % kind) path = os.path.join(options['data_dir'], config['file']) datasources = create_datasources(path) layer = datasources[0][0] # Create BoundarySet log.info("Creating BoundarySet: %s" % kind) bset = BoundarySet.objects.create( name=kind, singular=config['singular'], kind_first=config['kind_first'], authority=config['authority'], domain=config['domain'], last_updated=config['last_updated'], href=config['href'], notes=config['notes'], count=0, metadata_fields=layer.fields ) log.info("Created with slug %s and id %s" % (bset.slug, bset.id)) for datasource in datasources: log.info("Loading %s from %s" % (kind, datasource.name)) # Assume only a single-layer in shapefile if datasource.layer_count > 1: log.warn('%s shapefile [%s] has multiple layers, using first.' % (datasource.name, kind)) layer = datasource[0] self.add_boundaries_for_layer(config, layer, bset, options['database']) # sync this with reality bset.count = Boundary.objects.filter(set=bset).count() bset.save() log.info('%s count: %i' % (kind, bset.count)) def polygon_to_multipolygon(self, geom): """ Convert polygons to multipolygons so all features are homogenous in the database. """ if geom.__class__.__name__ == 'Polygon': g = OGRGeometry(OGRGeomType('MultiPolygon')) g.add(geom) return g elif geom.__class__.__name__ == 'MultiPolygon': return geom else: raise ValueError('Geom is neither Polygon nor MultiPolygon.') def add_boundaries_for_layer(self, config, layer, bset, database): # Get spatial reference system for the postgis geometry field geometry_field = Boundary._meta.get_field_by_name(GEOMETRY_COLUMN)[0] SpatialRefSys = connections[database].ops.spatial_ref_sys() db_srs = SpatialRefSys.objects.using(database).get( srid=geometry_field.srid).srs if 'srid' in config and config['srid']: layer_srs = SpatialRefSys.objects.get(srid=config['srid']).srs else: layer_srs = layer.srs # Simplification can be configured but default is to create simplified # geometry field by collapsing points within 1/1000th of a degree. # For reference, Chicago is at approx. 42 degrees latitude this works # out to a margin of roughly 80 meters latitude and 112 meters # longitude for Chicago area. simplification = config.get('simplification', 0.0001) # Create a convertor to turn the source data into transformer = CoordTransform(layer_srs, db_srs) for feature in layer: log.debug("Processing boundary %s" % feature) # Transform the geometry to the correct SRS geometry = self.polygon_to_multipolygon(feature.geom) geometry.transform(transformer) # Preserve topology prevents a shape from ever crossing over # itself. simple_geometry = geometry.geos.simplify(simplification, preserve_topology=True) # Conversion may force multipolygons back to being polygons simple_geometry = self.polygon_to_multipolygon(simple_geometry.ogr) # Extract metadata into a dictionary metadata = {} for field in layer.fields: # Decode string fields using encoding specified in definitions # config if config['encoding'] != '': try: metadata[field] = feature.get(field).decode( config['encoding']) # Only strings will be decoded, get value in normal way if # int etc. except AttributeError: metadata[field] = feature.get(field) else: metadata[field] = feature.get(field) external_id = config['ider'](feature) feature_name = config['namer'](feature) # If encoding is specified, decode id and feature name if config['encoding'] != '': external_id = external_id.decode(config['encoding']) feature_name = feature_name.decode(config['encoding']) if config['kind_first']: display_name = '%s %s' % (config['singular'], feature_name) else: display_name = '%s %s' % (feature_name, config['singular']) Boundary.objects.create( set=bset, kind=config['singular'], external_id=external_id, name=feature_name, display_name=display_name, metadata=metadata, shape=geometry.wkt, simple_shape=simple_geometry.wkt, centroid=geometry.geos.centroid) def create_datasources(path): if path.endswith('.zip'): path = temp_shapefile_from_zip(path) if path.endswith('.shp'): return [DataSource(path)] # assume it's a directory... sources = [] for fn in os.listdir(path): fn = os.path.join(path,fn) if fn.endswith('.zip'): fn = temp_shapefile_from_zip(fn) if fn.endswith('.shp'): sources.append(DataSource(fn)) return sources def temp_shapefile_from_zip(zip_path): """ Given a path to a ZIP file, unpack it into a temp dir and return the path to the shapefile that was in there. Doesn't clean up after itself unless there was an error. If you want to cleanup later, you can derive the temp dir from this path. """ log.info("Creating temporary SHP file from %s" % zip_path) zf = ZipFile(zip_path) tempdir = mkdtemp() shape_path = None # Copy the zipped files to a temporary directory, preserving names. for name in zf.namelist(): data = zf.read(name) outfile = os.path.join(tempdir, name) if name.endswith('.shp'): shape_path = outfile f = open(outfile, 'w') f.write(data) f.close() if shape_path is None: log.warn("No shapefile, cleaning up") # Clean up after ourselves. for file in os.listdir(tempdir): os.unlink(os.path.join(tempdir, file)) os.rmdir(tempdir) raise ValueError("No shapefile found in zip") return shape_path
mit
pvt88/scrapy-cloud
cobweb/spiders/search_spider_tbds.py
2
2712
import scrapy from datetime import datetime from cobweb.items import PropertyItem from cobweb.utilities import extract_number, extract_unit, extract_property_id, strip, extract_listing_type class SearchSpiderTBDS(scrapy.Spider): name = 'search_spider_tbds' def __init__(self, vendor=None, crawl_url=None, type=None, max_depth=2, start_index=1, *args, **kwargs): super(SearchSpiderTBDS, self).__init__(*args, **kwargs) self.vendor = vendor self.crawl_url = crawl_url self.index = int(start_index) self.type = type self.listing_type = extract_listing_type(self.crawl_url) self.max_depth = int(max_depth) self.start_urls = [self.vendor + self.crawl_url + "/p" + str(self.index)] def parse(self, response): if not isinstance(response, scrapy.http.response.html.HtmlResponse): response = scrapy.http.response.html.HtmlResponse(response.url, body=response.body) search_results = response.css(u'.col-gr-75per .group-prd li') for row in search_results: item = PropertyItem() item["vendor"] = self.vendor item["type"] = self.type item["listing_type"] = self.listing_type item["created_date"] = datetime.utcnow() item["last_indexed_date"] = datetime.utcnow() item["last_crawled_date"] = None subdomain = row.css(u'.content .title a::attr(href)').extract() if subdomain: item["link"] = self.vendor + subdomain[0].strip() item["property_id"] = extract_property_id(item["link"]) info = row.css(u'.content .info span::text').extract() if len(info) > 0: price = info[0].strip() item["property_price_raw"] = price item["property_price"] = extract_number(price) item["property_price_unit"] = extract_unit(price) if len(info) > 1: property_size = info[1].strip() item["property_size_raw"] = property_size item["property_size"] = extract_number(property_size) item["property_size_unit"] = extract_unit(property_size) property_area = row.css(u'.content .fsize-13::text').extract() if len(property_area) > 1: item["property_area"] = property_area[1].strip() item["posted_date"] = None yield item if self.index < self.max_depth and len(search_results) > 0: self.index += 1 next_url = self.vendor + self.crawl_url + "/p" + str(self.index) yield scrapy.Request(next_url, callback=self.parse)
gpl-3.0
celadevra/ParaJumper
parajumper/cli/new.py
1
3231
"""module to handle 'new' command.""" import tempfile import os import re from subprocess import call from clint.textui import prompt, puts, indent, colored import parajumper.item as item import parajumper.config as config import parajumper.db as db EDITOR = os.environ.get('EDITOR', 'vim') def dispatch(args): """Dispatcher for new command.""" if '-T' in args: tags = args.value_after('-T').split(',') else: tags = None if not args.flags.has(0): newitem() elif '-t' in args: newtodo(args.value_after('-t'), tags) elif '-e' in args: newevent(args.value_after('-e'), tags) elif '-n' in args: newnote(args.value_after('-n'), tags) def newitem(tags=None): """Create new item by calling default $EDITOR, read in user input, and parse content.""" conf = config.Config() bullets = conf.options['bullets'] puts("Please select a bullet for your note.") puts("Available bullets are:") for key in bullets: with indent(4): puts("%s : %s" % (key, bullets[key])) bullet = prompt.query("Your choice: ") initial_message = """<!-- Please enter your note below. You can use markdown --> <!-- lines starting with '&' and a space are interpreted as tags --> <!-- tags are separated by spaces, like this:--> <!-- & history roman hannibal expected_in_test -->""" notes = '' if tags is None: tags = [] tempf = tempfile.NamedTemporaryFile(suffix='.md', mode='w+', encoding='utf-8', delete=False) tempf.write(initial_message) tempf.flush() try: call([EDITOR, tempf.name]) except FileNotFoundError: call(['vi', tempf.name]) tempf.close() with open(tempf.name) as tempf: for line in tempf: if line[:4] != '<!--': if line[:2] != '& ': notes += line else: tags = tags + [x for x in line[2:-1].split(' ') if x != ''] os.remove(tempf.name) result = item.Item(bullet=bullet, content=re.sub('\n+$', '\n', notes), tags=tags) db.save_item(result) puts("New item saved with id = %s" % colored.green(result.identity)) def _find_bullet(what): """Find bullet char corresponding to string.""" conf = config.Config() bullets = conf.options['bullets'] return list(bullets.keys())[list(bullets.values()).index(what)] def newtodo(note, tags=None): """Quickly (non-interactively) create and store a new todo item.""" result = item.Item(bullet=_find_bullet('todo'), content=note, tags=tags) db.save_item(result) puts("New item saved with id = %s" % colored.green(result.identity)) def newevent(note, tags=None): """Quickly (non-interactively) create and store a new event item.""" result = item.Item(bullet=_find_bullet('event'), content=note, tags=tags) db.save_item(result) puts("New item saved with id = %s" % colored.green(result.identity)) def newnote(note, tags=None): """Quickly (non-interactively) create and store a new note item.""" result = item.Item(bullet=_find_bullet('notes'), content=note, tags=tags) db.save_item(result) puts("New item saved with id = %s" % colored.green(result.identity))
gpl-3.0
fharenheit/template-spark-app
src/main/python/ml/index_to_string_example.py
123
2014
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import IndexToString, StringIndexer # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("IndexToStringExample")\ .getOrCreate() # $example on$ df = spark.createDataFrame( [(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")], ["id", "category"]) indexer = StringIndexer(inputCol="category", outputCol="categoryIndex") model = indexer.fit(df) indexed = model.transform(df) print("Transformed string column '%s' to indexed column '%s'" % (indexer.getInputCol(), indexer.getOutputCol())) indexed.show() print("StringIndexer will store labels in output column metadata\n") converter = IndexToString(inputCol="categoryIndex", outputCol="originalCategory") converted = converter.transform(indexed) print("Transformed indexed column '%s' back to original string column '%s' using " "labels in metadata" % (converter.getInputCol(), converter.getOutputCol())) converted.select("id", "categoryIndex", "originalCategory").show() # $example off$ spark.stop()
apache-2.0
felix1m/pyspotify
spotify/user.py
3
2597
from __future__ import unicode_literals import spotify from spotify import ffi, lib, serialized, utils __all__ = [ 'User', ] class User(object): """A Spotify user. You can get users from the session, or you can create a :class:`User` yourself from a Spotify URI:: >>> session = spotify.Session() # ... >>> user = session.get_user('spotify:user:jodal') >>> user.load().display_name u'jodal' """ def __init__(self, session, uri=None, sp_user=None, add_ref=True): assert uri or sp_user, 'uri or sp_user is required' self._session = session if uri is not None: user = spotify.Link(self._session, uri=uri).as_user() if user is None: raise ValueError( 'Failed to get user from Spotify URI: %r' % uri) sp_user = user._sp_user add_ref = True if add_ref: lib.sp_user_add_ref(sp_user) self._sp_user = ffi.gc(sp_user, lib.sp_user_release) def __repr__(self): return 'User(%r)' % self.link.uri @property @serialized def canonical_name(self): """The user's canonical username.""" return utils.to_unicode(lib.sp_user_canonical_name(self._sp_user)) @property @serialized def display_name(self): """The user's displayable username.""" return utils.to_unicode(lib.sp_user_display_name(self._sp_user)) @property def is_loaded(self): """Whether the user's data is loaded yet.""" return bool(lib.sp_user_is_loaded(self._sp_user)) def load(self, timeout=None): """Block until the user's data is loaded. After ``timeout`` seconds with no results :exc:`~spotify.Timeout` is raised. If ``timeout`` is :class:`None` the default timeout is used. The method returns ``self`` to allow for chaining of calls. """ return utils.load(self._session, self, timeout=timeout) @property def link(self): """A :class:`Link` to the user.""" return spotify.Link( self._session, sp_link=lib.sp_link_create_from_user(self._sp_user), add_ref=False) @property def starred(self): """The :class:`Playlist` of tracks starred by the user.""" return self._session.get_starred(self.canonical_name) @property def published_playlists(self): """The :class:`PlaylistContainer` of playlists published by the user.""" return self._session.get_published_playlists(self.canonical_name)
apache-2.0
Becksteinlab/MDPOW
mdpow/version.py
1
1579
# POW package __init__.py # Copyright (c) 2010 Oliver Beckstein <[email protected]> # Released under the GNU Public License 3 (or higher, your choice) # See the file COPYING for details. """\ MDPOW version information ========================= MDPOW uses `semantic versioning`_ with the release number consisting of a triplet *MAJOR.MINOR.PATCH*. *PATCH* releases are bug fixes or updates to docs or meta data only and do not introduce new features or change the API. Within a *MAJOR* release, the user API is stable except during the development cycles with MAJOR = 0 where the API may also change (rarely) between MINOR releases. *MINOR* releases can introduce new functionality or deprecate old ones. Development versions will have the suffix *-dev* after the version string. .. _semantic versioning: http://semver.org Accessing release information ----------------------------- User code should use :func:`get_version` or `get_version_tuple`. .. autodata:: VERSION .. autofunction:: get_version .. autofunction:: get_version_tuple """ #: Package version; this is the only place where it is set. VERSION = 0,7,0 #: Set to ``True`` for a release. If set to ``False`` then the patch level #: will have the suffix "-dev". RELEASE = False if not RELEASE: VERSION = VERSION[:2] + (str(VERSION[2]) + '-dev',) def get_version(): """Return current package version as a string.""" return ".".join(map(str,VERSION)) def get_version_tuple(): """Return current package version as a tuple (*MAJOR*, *MINOR*, *PATCHLEVEL*).""" return tuple(map(str,VERSION))
gpl-3.0
mortada/numpy
numpy/core/tests/test_umath_complex.py
70
19916
from __future__ import division, absolute_import, print_function import sys import platform from numpy.testing import * import numpy.core.umath as ncu import numpy as np # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) def platform_skip(func): return dec.skipif(skip_complex_tests, "Numpy is using complex functions (e.g. sqrt) provided by your" "platform's C library. However, they do not seem to behave according" "to C99 -- so C99 tests are skipped.")(func) class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp yield check, f, 1, 0, np.exp(1), 0, False yield check, f, 0, 1, np.cos(1), np.sin(1), False ref = np.exp(1) * np.complex(np.cos(1), np.sin(1)) yield check, f, 1, 1, ref.real, ref.imag, False @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i yield check, f, np.PZERO, 0, 1, 0, False yield check, f, np.NZERO, 0, 1, 0, False # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception yield check, f, 1, np.inf, np.nan, np.nan yield check, f, -1, np.inf, np.nan, np.nan yield check, f, 0, np.inf, np.nan, np.nan # cexp(inf + 0i) is inf + 0i yield check, f, np.inf, 0, np.inf, 0 # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y ref = np.complex(np.cos(1.), np.sin(1.)) yield check, f, -np.inf, 1, np.PZERO, np.PZERO ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y ref = np.complex(np.cos(1.), np.sin(1.)) yield check, f, np.inf, 1, np.inf, np.inf ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(np.complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform %(z.real, z.imag)) yield _check_ninf_inf, None # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(np.complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) yield _check_inf_inf, None # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(np.complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) yield _check_ninf_nan, None # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(np.complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) yield _check_inf_nan, None # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) yield check, f, np.nan, 1, np.nan, np.nan yield check, f, np.nan, -1, np.nan, np.nan yield check, f, np.nan, np.inf, np.nan, np.nan yield check, f, np.nan, -np.inf, np.nan, np.nan # cexp(nan + nani) is nan + nani yield check, f, np.nan, np.nan, np.nan, np.nan @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i yield check, f, np.nan, 0, np.nan, 0 class TestClog(TestCase): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=np.complex) y = np.complex(-np.inf, np.pi) self.assertRaises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=np.complex) y = np.complex(-np.inf, 0) self.assertRaises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=np.complex) y = np.complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=np.complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=np.complex) y = np.complex(np.nan, np.nan) #self.assertRaises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=np.complex) #self.assertRaises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=np.complex) y = np.complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=np.complex) y = np.complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=np.complex) y = np.complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=np.complex) y = np.complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=np.complex) y = np.complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=np.complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=np.complex) y = np.complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=np.complex) y = np.complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=np.complex) y = np.complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=np.complex) ya = np.array(yl, dtype=np.complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i]))) class TestCsqrt(object): def test_simple(self): # sqrt(1) yield check_complex_value, np.sqrt, 1, 0, 1, 0 # sqrt(1i) yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False # sqrt(-1) yield check_complex_value, np.sqrt, -1, 0, 0, 1 def test_simple_conjugate(self): ref = np.conj(np.sqrt(np.complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) yield check_complex_value, f, 1, 1, ref.real, ref.imag, False #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): check = check_complex_value f = np.sqrt # C99: Sec G 6.4.2 x, y = [], [] # csqrt(+-0 + 0i) is 0 + 0i yield check, f, np.PZERO, 0, 0, 0 yield check, f, np.NZERO, 0, 0, 0 # csqrt(x + infi) is inf + infi for any x (including NaN) yield check, f, 1, np.inf, np.inf, np.inf yield check, f, -1, np.inf, np.inf, np.inf yield check, f, np.PZERO, np.inf, np.inf, np.inf yield check, f, np.NZERO, np.inf, np.inf, np.inf yield check, f, np.inf, np.inf, np.inf, np.inf yield check, f, -np.inf, np.inf, np.inf, np.inf yield check, f, -np.nan, np.inf, np.inf, np.inf # csqrt(x + nani) is nan + nani for any finite x yield check, f, 1, np.nan, np.nan, np.nan yield check, f, -1, np.nan, np.nan, np.nan yield check, f, 0, np.nan, np.nan, np.nan # csqrt(-inf + yi) is +0 + infi for any finite y > 0 yield check, f, -np.inf, 1, np.PZERO, np.inf # csqrt(inf + yi) is +inf + 0i for any finite y > 0 yield check, f, np.inf, 1, np.inf, np.PZERO # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(np.complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) yield _check_ninf_nan, None # csqrt(+inf + nani) is inf + nani yield check, f, np.inf, np.nan, np.inf, np.nan # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) yield check, f, np.nan, 0, np.nan, np.nan yield check, f, np.nan, 1, np.nan, np.nan yield check, f, np.nan, np.nan, np.nan, np.nan # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(TestCase): def setUp(self): self.olderr = np.seterr(invalid='ignore') def tearDown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setUp(self): self.olderr = np.seterr(invalid='ignore') def tearDown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=np.complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=np.complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) yield check_real_value, np.abs, np.nan, np.nan, np.nan x.append(np.nan) y.append(-np.nan) yield check_real_value, np.abs, -np.nan, np.nan, np.nan # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) yield check_real_value, np.abs, np.inf, np.nan, np.inf x.append(-np.inf) y.append(np.nan) yield check_real_value, np.abs, -np.inf, np.nan, np.inf # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(np.complex(a, b)) xa = np.array(x, dtype=np.complex) for i in range(len(xa)): ref = g(x[i], y[i]) yield check_real_value, f, x[i], y[i], ref class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) @dec.knownfailureif(True, "Complex arithmetic with signed zero is buggy on most implementation") def test_zero(self): # carg(-0 +- 0i) returns +- pi yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False # carg(+0 +- 0i) returns +- 0 yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO # carg(x +- 0i) returns +- 0 for x > 0 yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False # carg(x +- 0i) returns +- pi for x < 0 yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False # carg(+- 0 + yi) returns pi/2 for y > 0 yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False # carg(+- 0 + yi) returns -pi/2 for y < 0 yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False # carg(np.inf +- yi) returns +-0 for finite y > 0 yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False # carg(x +- np.infi) returns +-pi/2 for finite x yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False # carg(-np.inf +- np.infi) returns +-3pi/4 yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False # carg(np.inf +- np.infi) returns +-pi/4 yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False # carg(x + yi) returns np.nan if x or y is nan yield check_real_value, ncu._arg, np.nan, 0, np.nan, False yield check_real_value, ncu._arg, 0, np.nan, np.nan, False yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = np.complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2) if __name__ == "__main__": run_module_suite()
bsd-3-clause
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/tools/real_world_impact/nsfw_urls.py
113
2143
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """NSFW urls in the Alexa top 2000 sites.""" nsfw_urls = set([ "http://xhamster.com/", "http://xvideos.com/", "http://livejasmin.com/", "http://pornhub.com/", "http://redtube.com/", "http://youporn.com/", "http://xnxx.com/", "http://tube8.com/", "http://youjizz.com/", "http://adultfriendfinder.com/", "http://hardsextube.com/", "http://yourlust.com/", "http://drtuber.com/", "http://beeg.com/", "http://largeporntube.com/", "http://nuvid.com/", "http://bravotube.net/", "http://spankwire.com/", "http://discreethearts.com/", "http://keezmovies.com/", "http://xtube.com/", "http://alphaporno.com/", "http://4tube.com/", "http://nudevista.com/", "http://porntube.com/", "http://xhamstercams.com/", "http://porn.com/", "http://video-one.com/", "http://perfectgirls.net/", "http://slutload.com/", "http://sunporno.com/", "http://tnaflix.com/", "http://pornerbros.com/", "http://h2porn.com/", "http://adult-empire.com/", "http://pornhublive.com/", "http://sexitnow.com/", "http://pornsharia.com/", "http://freeones.com/", "http://tubegalore.com/", "http://xvideos.jp/", "http://brazzers.com/", "http://fapdu.com/", "http://pornoxo.com/", "http://extremetube.com/", "http://hot-sex-tube.com/", "http://xhamsterhq.com/", "http://18andabused.com/", "http://tubepleasure.com/", "http://18schoolgirlz.com/", "http://chaturbate.com/", "http://motherless.com/", "http://yobt.com/", "http://empflix.com/", "http://hellporno.com/", "http://ashemaletube.com/", "http://watchmygf.com/", "http://redtubelive.com/", "http://met-art.com/", "http://gonzoxxxmovies.com/", "http://shufuni.com/", "http://vid2c.com/", "http://dojki.com/", "http://cerdas.com/", "http://overthumbs.com/", "http://xvideoslive.com/", "http://playboy.com/", "http://caribbeancom.com/", "http://tubewolf.com/", "http://xmatch.com/", "http://ixxx.com/", "http://nymphdate.com/", ])
mit
RUNDSP/luigi-swf
luigi_swf/examples/task_basic.py
1
1677
#!/usr/bin/env python import datetime import logging import os.path from subprocess import call import luigi from luigi_swf import cw, LuigiSwfExecutor logger = logging.getLogger(__name__) seconds = 1. minutes = 60. * seconds hours = 60. * minutes class DemoBasicTask(luigi.Task): # Workaround for when the task is in the same file you're executing __module__ = 'luigi_swf.examples.task_basic' dt = luigi.DateParameter() hour = luigi.IntParameter() # Default values swf_task_list = 'default' swf_retries = 0 swf_start_to_close_timeout = None # in seconds swf_heartbeat_timeout = None # in seconds # Use luigi_swf.cw.cw_update_workflows() to sync these to CloudWatch. swf_cw_alarms = [ cw.TaskFailedAlarm(['arn:aws:sns:us-east-1:1234567:alert_ops']), cw.TaskFailedAlarm(['arn:aws:sns:us-east-1:1234567:alert_ops']), cw.TaskHasNotCompletedAlarm( ['arn:aws:sns:us-east-1:1234567:alert_ops'], period=2.5 * hours), ] def output(self): path = os.path.expanduser('~/luigi-swf-demo-basic-complete') return luigi.LocalTarget(path) def run(self): logger.info('hi | %s', self.dt) call(['touch', self.output().path]) class DemoBasicWorkflow(luigi.WrapperTask): dt = luigi.DateParameter() hour = luigi.IntParameter() def requires(self): return DemoBasicTask(dt=self.dt, hour=self.hour) if __name__ == '__main__': task = DemoBasicWorkflow(dt=datetime.datetime(2000, 1, 1), hour=0) domain = 'development' version = 'unspecified' ex = LuigiSwfExecutor(domain, version, task) ex.register() ex.execute()
apache-2.0
zero-rp/miniblink49
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
34
7369
# Copyright (C) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import errno import logging import re from webkitpy.layout_tests.models import test_expectations _log = logging.getLogger(__name__) class LayoutTestFinder(object): def __init__(self, port, options): self._port = port self._options = options self._filesystem = self._port.host.filesystem self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' def find_tests(self, options, args): paths = self._strip_test_dir_prefixes(args) if options.test_list: paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR)) test_files = self._port.tests(paths) return (paths, test_files) def _strip_test_dir_prefixes(self, paths): return [self._strip_test_dir_prefix(path) for path in paths if path] def _strip_test_dir_prefix(self, path): # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if # the filesystem uses '\\' as a directory separator. if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR): return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):] if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep): return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):] return path def _read_test_names_from_file(self, filenames, test_path_separator): fs = self._filesystem tests = [] for filename in filenames: try: if test_path_separator != fs.sep: filename = filename.replace(test_path_separator, fs.sep) file_contents = fs.read_text_file(filename).split('\n') for line in file_contents: line = self._strip_comments(line) if line: tests.append(line) except IOError, e: if e.errno == errno.ENOENT: _log.critical('') _log.critical('--test-list file "%s" not found' % file) raise return tests @staticmethod def _strip_comments(line): commentIndex = line.find('//') if commentIndex is -1: commentIndex = len(line) line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) if line == '': return None else: return line def skip_tests(self, paths, all_tests_list, expectations, http_tests): all_tests = set(all_tests_list) tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP) if self._options.skip_failing_tests: tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL)) tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY)) if self._options.skipped == 'only': tests_to_skip = all_tests - tests_to_skip elif self._options.skipped == 'ignore': tests_to_skip = set() elif self._options.skipped != 'always': # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'. tests_to_skip -= set(paths) return tests_to_skip def split_into_chunks(self, test_names): """split into a list to run and a set to skip, based on --run-chunk and --run-part.""" if not self._options.run_chunk and not self._options.run_part: return test_names, set() # If the user specifies they just want to run a subset of the tests, # just grab a subset of the non-skipped tests. chunk_value = self._options.run_chunk or self._options.run_part try: (chunk_num, chunk_len) = chunk_value.split(":") chunk_num = int(chunk_num) assert(chunk_num >= 0) test_size = int(chunk_len) assert(test_size > 0) except AssertionError: _log.critical("invalid chunk '%s'" % chunk_value) return (None, None) # Get the number of tests num_tests = len(test_names) # Get the start offset of the slice. if self._options.run_chunk: chunk_len = test_size # In this case chunk_num can be really large. We need # to make the slave fit in the current number of tests. slice_start = (chunk_num * chunk_len) % num_tests else: # Validate the data. assert(test_size <= num_tests) assert(chunk_num <= test_size) # To count the chunk_len, and make sure we don't skip # some tests, we round to the next value that fits exactly # all the parts. rounded_tests = num_tests if rounded_tests % test_size != 0: rounded_tests = (num_tests + test_size - (num_tests % test_size)) chunk_len = rounded_tests / test_size slice_start = chunk_len * (chunk_num - 1) # It does not mind if we go over test_size. # Get the end offset of the slice. slice_end = min(num_tests, slice_start + chunk_len) tests_to_run = test_names[slice_start:slice_end] _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start))) # If we reached the end and we don't have enough tests, we run some # from the beginning. if slice_end - slice_start < chunk_len: extra = chunk_len - (slice_end - slice_start) _log.debug(' last chunk is partial, appending [0:%d]' % extra) tests_to_run.extend(test_names[0:extra]) return (tests_to_run, set(test_names) - set(tests_to_run))
apache-2.0
matthiasdiener/spack
lib/spack/spack/test/cmd/list.py
3
2522
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import pytest from spack.main import SpackCommand list = SpackCommand('list') def test_list(): output = list() assert 'cloverleaf3d' in output assert 'hdf5' in output def test_list_filter(): output = list('py-*') assert 'py-numpy' in output assert 'perl-file-copy-recursive' not in output output = list('py-') assert 'py-numpy' in output assert 'perl-file-copy-recursive' in output @pytest.mark.maybeslow def test_list_search_description(): output = list('--search-description', 'xml') assert 'expat' in output def test_list_tags(): output = list('--tags', 'proxy-app') assert 'cloverleaf3d' in output assert 'hdf5' not in output def test_list_format_name_only(): output = list('--format', 'name_only') assert 'cloverleaf3d' in output assert 'hdf5' in output @pytest.mark.maybeslow def test_list_format_rst(): output = list('--format', 'rst') assert '.. _cloverleaf3d:' in output assert '.. _hdf5:' in output @pytest.mark.maybeslow def test_list_format_html(): output = list('--format', 'html') assert '<div class="section" id="cloverleaf3d">' in output assert '<h1>cloverleaf3d' in output assert '<div class="section" id="hdf5">' in output assert '<h1>hdf5' in output
lgpl-2.1
Mystic-Mirage/android_kernel_gigabyte_roma_r2_plus
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
dmlc/tvm
python/tvm/topi/arm_cpu/depthwise_conv2d.py
1
24416
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name,unused-variable """Depthwise convolution schedule for ARM CPU""" import tvm from tvm import te from tvm import autotvm from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity from .. import nn from ..utils import traverse_inline, get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import smlal_int16_int32 from .arm_utils import is_aarch64_arm @autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu") def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype): """Compute depthwise_conv2d with NCHW layout""" return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype) @autotvm.register_topi_schedule("depthwise_conv2d_nchw.arm_cpu") def schedule_depthwise_conv2d_nchw(cfg, outs): """Schedule depthwise conv2d Parameters ---------- cfg: ConfigEntity The configuration of this template outs: Array of Tensor The computation graph description of depthwise convolution2d in the format of an array of tensors. Returns ------- s: Schedule The computation schedule for depthwise_conv2d nchw. """ outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) def _schedule(cfg, s, data, data_pad, kernel, output): A, B, C = data, kernel, output s[data_pad].compute_inline() ##### space definition begin ##### n, c, h, w = s[output].op.axis _, vc = cfg.define_split("tile_c", c, num_outputs=2) _, vh = cfg.define_split("tile_h", h, num_outputs=2) _, vw = cfg.define_split("tile_w", w, num_outputs=2) cfg.define_annotate("ann", [vh, vw, vc], policy="try_unroll_vec") # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( "arm_cpu", "rk3399", "depthwise_conv2d_nchw.arm_cpu" ) cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### # park data to vector form [n, c, h, w] -> [n, C, h, w, VC] A0 = s.cache_read(data_pad, "global", C) n, c, h, w = s[A0].op.axis c, vc = cfg["tile_c"].apply(s, A0, c) s[A0].reorder(n, c, h, w, vc) A1 = s.cache_write(A0, "global") s[A0].compute_inline() # park kernel to vector form [co, ci, kh, kw] -> [CO, ci, kh, kw, VC] B0 = s.cache_read(B, "global", C) c, m, h, w = s[B0].op.axis c, vc, = cfg[ "tile_c" ].apply(s, B0, c) s[B0].reorder(c, m, h, w, vc) B1 = s.cache_write(B0, "global") s[B0].compute_inline() n, c, h, w = s[C].op.axis c, vc, = cfg[ "tile_c" ].apply(s, C, c) s[C].reorder(n, c, h, w, vc) # depthwise conv C0 = s.cache_write(C, "global") _, c, h, w, vc = s[C0].op.axis dh, dw = s[C0].op.reduce_axis oh, ih = cfg["tile_h"].apply(s, C0, h) ow, iw = cfg["tile_w"].apply(s, C0, w) s[C0].reorder(c, oh, ow, dh, dw, ih, iw, vc) s[A1].compute_at(s[C0], oh) # try unroll and vectorization cfg["ann"].apply( s, C0, [ih, iw, vc], axis_lens=[cfg["tile_h"].size[-1], cfg["tile_w"].size[-1], cfg["tile_c"].size[-1]], max_unroll=16, cfg=cfg, ) # fusion if C.op not in s.outputs: s[C].compute_inline() # mark parallel last = outs[0] n, c, h, w = s[last].op.axis s[last].parallel(c) n, c, h, w, vc = s[C0].op.axis s[C0].parallel(c) c, m, h, w, vc = s[B1].op.axis s[B1].parallel(c) return s def _callback(op): if op.tag == "depthwise_conv2d_nchw": output = op.output(0) kernel = op.input_tensors[1] data = op.input_tensors[0] data_pad = None if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag: data_pad = data data = data_pad.op.input_tensors[0] _schedule(cfg, s, data, data_pad, kernel, output) traverse_inline(s, outs[0].op, _callback) return s # TODO: # This schedule has incorrect result on some hardware platforms (like NV Jetson TX2) # Let us comment it out but not remove. # see discussion: # https://discuss.tvm.apache.org/t/autotuner-incorrect-result-after-tuning-mobilenetv2-on-arm-cpu/6088 @autotvm.register_topi_compute("depthwise_conv2d_nchw_spatial_pack.arm_cpu") def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype): """TOPI compute callback for depthwise_conv2d nchw Parameters ---------- cfg: ConfigEntity The config for this template data : tvm.te.Tensor 4-D with shape [batch, in_channel, in_height, in_width] kernel : tvm.te.Tensor 4-D with shape [num_filter, multiplier, filter_height, filter_width] or pre-packed 5-D with shape [num_filter_chunk, multiplier, filter_height, filter_width, num_filter_block] strides : list of two ints [stride_height, stride_width] padding : list of two ints [pad_height, pad_width] dilation : list of two ints [dilation_height, dilation_width] out_dtype: str The output type. This is used for mixed precision. Returns ------- output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2) @autotvm.register_topi_compute("depthwise_conv2d_nhwc.arm_cpu") def compute_depthwise_conv2d_nhwc(_, data, kernel, strides, padding, dilation, out_dtype): """TOPI compute callback for depthwise_conv2d nhwc Parameters ---------- cfg: ConfigEntity The config for this template data : tvm.te.Tensor 4-D with shape [batch, in_height, in_width, in_channel] kernel : tvm.te.Tensor 4-D with shape [filter_height, filter_width, in_channel, channel_multiplier] strides : list of two ints [stride_height, stride_width] padding : list of two ints [pad_height, pad_width] dilation : list of two ints [dilation_height, dilation_width] out_dtype: str The output type. This is used for mixed precision. Returns ------- output : tvm.te.Tensor 4-D with shape [batch, out_height, out_width, out_channel] """ out_dtype = out_dtype or data.dtype N, IH, IW, IC = get_const_tuple(data.shape) if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = dilation KH, KW, IC, channel_multiplier = get_const_tuple(kernel.shape) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1 OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1 if pad_top or pad_left or pad_down or pad_right: data_pad = nn.pad( data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad" ) else: data_pad = data output_shape = (N, OH, OW, IC * channel_multiplier) idxdiv = tvm.tir.indexdiv idxmod = tvm.tir.indexmod reduce_h = te.reduce_axis((0, KH), name="reduce_h") reduce_w = te.reduce_axis((0, KW), name="reduce_w") out = te.compute( output_shape, lambda n, h, w, c: te.sum( data_pad[ n, HSTR * h + dilation_h * reduce_h, w * WSTR + reduce_w * dilation_w, idxdiv(c, channel_multiplier), ].astype(out_dtype) * kernel[ reduce_h, reduce_w, idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier) ].astype(out_dtype), axis=[reduce_h, reduce_w], ), name="depthwise_conv2d_nhwc_output", ) return out @autotvm.register_topi_schedule("depthwise_conv2d_nhwc.arm_cpu") def schedule_depthwise_conv2d_nhwc(cfg, outs): """Create the schedule for depthwise_conv2d_nchw_spatial_pack""" outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) out = outs[0] ##### space definition begin ##### n, h, w, c = s[out].op.axis # Split the number of input/output channels cfg.define_split("tile_c", c, num_outputs=2) # Split the height of the convolution _, hi = cfg.define_split("tile_h", h, num_outputs=2) # Split the width of the convolution _, wi = cfg.define_split("tile_w", w, num_outputs=2) # Additional out (e.g., requantization, bias addition, etc..) # 0: locate the output on the second last axis of the main compuation # 1: locate the output closest to the main computation cfg.define_knob("locate_output", [0, 1]) # Determine if we should unroll the computation of the inner tile cfg.define_knob("unroll_tile", [True, False]) # fallback support if cfg.is_fallback: cfg["tile_c"] = SplitEntity([-1, 8]) cfg["tile_h"] = SplitEntity([-1, 2]) cfg["tile_w"] = SplitEntity([-1, 2]) cfg["locate_output"] = OtherOptionEntity(1) cfg["unroll_tile"] = OtherOptionEntity(True) ##### space definition end ##### def schedule_conv(conv): conv_data = conv.op.input_tensors[0] kernel_data = conv.op.input_tensors[1] in_type = conv_data.dtype _, _, IC, channel_multiplier = get_const_tuple(kernel_data.shape) n, w, h, c = conv.op.axis r_h, r_w = conv.op.reduce_axis ho, hi = cfg["tile_h"].apply(s, conv, h) wo, wi = cfg["tile_w"].apply(s, conv, w) co, ci = cfg["tile_c"].apply(s, conv, c) split_val = cfg["tile_c"].size[-1] use_tensorization = ( (in_type == "int16") and (split_val == 8) and (IC % split_val == 0) and (channel_multiplier == 1) and is_aarch64_arm() ) data_pad_value = -1 if conv_data.name == "data_pad": assert isinstance(conv_data.op, tvm.te.ComputeOp) # Define a strategy for padding computation cfg.define_knob("data_pad_strategy", [1, 2, 3]) if cfg.is_fallback: # We cannot inline padding when tensorizing. # So, if we can tensorize, let's compute_at the closest axis cfg["data_pad_strategy"] = ( OtherOptionEntity(2) if use_tensorization else OtherOptionEntity(3) ) # Compute padding on the third to last axis of the computation if cfg["data_pad_strategy"].val == 1: s[conv_data].vectorize(list(s[conv_data].op.axis)[-1]) s[conv_data].compute_at(s[conv], ho) # Compute padding on the second to last axis of the computation if cfg["data_pad_strategy"].val == 2: s[conv_data].vectorize(list(s[conv_data].op.axis)[-1]) s[conv_data].compute_at(s[conv], wo) # Inline padding during computation if cfg["data_pad_strategy"].val == 3: s[conv_data].compute_inline() data_pad_value = cfg["data_pad_strategy"].val if use_tensorization and data_pad_value != 3: smlal = smlal_int16_int32() s[conv].tensorize(ci, smlal) else: s[conv].vectorize(ci) if cfg["unroll_tile"].val: s[conv].unroll(r_h) s[conv].unroll(r_w) s[conv].unroll(wi) s[conv].unroll(hi) s[conv].reorder(n, ho, wo, co, hi, wi, r_h, r_w, ci) fused_n_ho = s[conv].fuse(n, ho) return fused_n_ho def schedule_conv_out(out): n, h, w, c = out.op.axis co, ci = cfg["tile_c"].apply(s, out, c) wo, wi = cfg["tile_w"].apply(s, out, w) ho, hi = cfg["tile_h"].apply(s, out, h) s[out].reorder(n, ho, wo, co, hi, wi, ci) if cfg["unroll_tile"]: s[out].unroll(wi) s[out].unroll(hi) if out.dtype in ["int8", "uint8"]: # In case of quantized convolution further split the channel in batches of 4 elements # so that we can use arm intrinsics to run fixed_point_multiplication ci_outer, ci_inner = s[out].split(ci, 4) s[out].vectorize(ci_inner) s[out].unroll(ci_outer) fused_n_ho = s[out].fuse(n, ho) return hi, wi, fused_n_ho def _callback(op): if op.name == "depthwise_conv2d_nhwc_output": conv = op.output(0) if conv != out: hi, wi, p_axis = schedule_conv_out(out) schedule_conv(conv) if cfg["locate_output"].val == 0: s[conv].compute_at(s[out], hi) if cfg["locate_output"].val == 1: s[conv].compute_at(s[out], wi) else: p_axis = schedule_conv(out) s[out].parallel(p_axis) traverse_inline(s, outs[0].op, _callback) return s @autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu") def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs): """Create the schedule for depthwise_conv2d_nchw_spatial_pack""" outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == "spatial_depthwise_conv2d_nchw_output": output = op.output(0) conv = op.input_tensors[0] data_vec = conv.op.input_tensors[0] kernel_vec = conv.op.input_tensors[1] if kernel_vec.op.name == "kernel_vec": kernel = kernel_vec.op.input_tensors[0] else: kernel = kernel_vec if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag: s[kernel].compute_inline() _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0]) traverse_inline(s, outs[0].op, _callback) return s def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile): out_dtype = out_dtype or data.dtype N, C, IH, IW = get_const_tuple(data.shape) if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = dilation if len(kernel.shape) == 4: pre_packed = False C, M, KH, KW = get_const_tuple(kernel.shape) else: # kernel tensor is pre packed pre_packed = True C, M, KH, KW, VC = get_const_tuple(kernel.shape) C = C * VC dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1 OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1 # pack data HPAD = pad_top + pad_down WPAD = pad_left + pad_right DOPAD = HPAD != 0 or WPAD != 0 if DOPAD: data_pad = nn.pad( data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), name="data_pad" ) else: data_pad = data # fallback support # Currently, Mali schedule doesn't use it like conv2d. if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log( "arm_cpu", "rk3399", "depthwise_conv2d_nchw_spatial_pack.arm_cpu" ) cfg.fallback_with_reference_log(ref_log) # ==================== define configuration space ==================== n, c, oh, ow = cfg.axis(N), cfg.axis(C), cfg.axis(OH), cfg.axis(OW) kh, kw = cfg.reduce_axis(KH), cfg.reduce_axis(KW) # Currently, Mali schedule doesn't use it like conv2d. # Leave num_tile for possible future use of Mali schedule if num_tile == 2: # for arm cpu co, vc = cfg.define_split("tile_co", c, num_outputs=2) oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2) ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2) else: raise RuntimeError("Invalid num_tile") cfg.define_reorder( "reorder_0", [n, co, oh, ow, kh, kw, vh, vw, vc], policy="candidate", candidate=[[n, co, oh, ow, kh, kw, vh, vw, vc], [n, co, oh, ow, kh, kw, vc, vh, vw]], ) cfg.define_reorder( "reorder_1", [n, co, oh, ow, vh, vw, vc], policy="candidate", candidate=[ [n, co, oh, ow, vh, vw, vc], [n, co, oh, ow, vc, vh, vw], [n, co, oh, ow, vh, vc, vw], ], ) cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll") cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec") # ==================================================================== VC = cfg["tile_co"].size[-1] VH = cfg["tile_oh"].size[-1] VW = cfg["tile_ow"].size[-1] kvshape = (C // VC, M, KH, KW, VC) ovshape = (N, C * M // VC, OH // VH, OW // VW, VH, VW, VC) oshape = (N, C * M, OH, OW) if dilation_h != 1 or dilation_w != 1: # undilate input data dvshape = (N, OH // VH, OW // VW, C, KH, KW, VH, VW) data_vec = te.compute( dvshape, lambda n, h, w, c, kh, kw, vh, vw: data_pad[n][c][ (h * VH + vh) * HSTR + kh * dilation_h ][(w * VW + vw) * WSTR + kw * dilation_w], name="data_vec_undilated", ) else: dvshape = (N, OH // VH, OW // VW, C, VH * HSTR + KH - 1, VW * WSTR + KW - 1) data_vec = te.compute( dvshape, lambda n, h, w, c, vh, vw: data_pad[n][c][h * VH * HSTR + vh][w * VW * WSTR + vw], name="data_vec", ) if pre_packed: kernel_vec = kernel else: kernel_vec = te.compute( kvshape, lambda co, m, kh, kw, vc: kernel[co * VC + vc][m][kh][kw], name="kernel_vec" ) kh = te.reduce_axis((0, KH), name="kh") kw = te.reduce_axis((0, KW), name="kw") idxdiv = tvm.tir.indexdiv idxmod = tvm.tir.indexmod if dilation_h != 1 or dilation_w != 1: conv = te.compute( ovshape, lambda n, co, h, w, vh, vw, vc: te.sum( data_vec[n, h, w, idxdiv(co * VC + vc, M), kh, kw, vh, vw].astype(out_dtype) * kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype), axis=[kh, kw], ), name="depthwise_conv", ) else: conv = te.compute( ovshape, lambda n, co, h, w, vh, vw, vc: te.sum( data_vec[n, h, w, idxdiv((co * VC + vc), M), vh * HSTR + kh, vw * WSTR + kw].astype( out_dtype ) * kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype), axis=[kh, kw], ), name="depthwise_conv", ) output = te.compute( oshape, lambda n, co, h, w: conv[ n, idxdiv(co, VC), idxdiv(h, VH), idxdiv(w, VW), idxmod(h, VH), idxmod(w, VW), idxmod(co, VC), ], name="output_unpack", tag="spatial_depthwise_conv2d_nchw_output", ) return output def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last): """schedule implementation""" n, co, oh, ow, vh, vw, vc = s[conv].op.axis kh, kw = s[conv].op.reduce_axis if data_vec.op.name == "data_vec_undilated": _, dv_oh, dv_ow, dv_c, _, _, dv_vh, dv_vw = s[data_vec].op.axis else: _, dv_oh, dv_ow, dv_c, dv_vh, dv_vw = s[data_vec].op.axis data_pad = data_vec.op.input_tensors[0] if data_pad.op.name == "data_pad": assert isinstance(data_pad.op, tvm.te.ComputeOp) has_padding = True else: assert isinstance(data_pad.op, tvm.te.PlaceholderOp) has_padding = False cfg.define_knob("data_pad_inline", [0, 1, 2, 3, 4]) if cfg["data_pad_inline"].val == 1 and has_padding: s[data_pad].compute_inline() if cfg["data_pad_inline"].val == 2 and has_padding: s[data_pad].vectorize(list(s[data_pad].op.axis)[-1]) if cfg["data_pad_inline"].val == 3 and has_padding: s[data_pad].vectorize(list(s[data_pad].op.axis)[-1]) s[data_pad].compute_at(s[data_vec], dv_oh) if cfg["data_pad_inline"].val == 4 and has_padding: s[data_pad].vectorize(list(s[data_pad].op.axis)[-1]) s[data_pad].compute_at(s[data_vec], dv_ow) cfg.define_knob("data_vec_inline", [0, 1, 2, 3]) if cfg["data_vec_inline"].val == 1: s[data_vec].compute_at(s[conv], oh) if cfg["data_vec_inline"].val == 2: s[data_vec].compute_at(s[conv], ow) if cfg["data_vec_inline"].val == 3: s[data_vec].compute_at(s[conv], co) # schedule conv cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, kh, kw, vh, vw, vc]) cfg["ann_reduce"].apply( s, conv, [kh, kw], axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)], max_unroll=16, cfg=cfg, ) cfg["ann_spatial"].apply( s, conv, [vh, vw, vc], axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]], max_unroll=16, cfg=cfg, ) # schedule fusion n, co, h, w = s[last].op.axis co, vc = cfg["tile_co"].apply(s, last, co) oh, vh = cfg["tile_oh"].apply(s, last, h) ow, vw = cfg["tile_ow"].apply(s, last, w) cfg["reorder_1"].apply(s, last, [n, co, oh, ow, vh, vw, vc]) if last != output: s[output].compute_inline() cfg["ann_spatial"].apply( s, last, [vh, vw, vc], axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]], max_unroll=16, cfg=cfg, ) else: s[last].vectorize(vw) cfg.define_knob("conv_inline", [0, 1, 2, 3]) if cfg["conv_inline"].val == 1: s[conv].compute_at(s[last], ow) if cfg["conv_inline"].val == 2: s[conv].compute_at(s[last], oh) if cfg["conv_inline"].val == 3: s[conv].compute_at(s[last], co) # mark parallel s[last].parallel(co) if data_vec.op.name == "data_vec_undilated": _, h, _, _, _, _, _, _ = s[data_vec].op.axis else: _, h, _, _, _, _ = s[data_vec].op.axis s[data_vec].parallel(h) if kernel_vec.op.name == "kernel_vec": co, _, _, _, _ = s[kernel_vec].op.axis if autotvm.GLOBAL_SCOPE.in_tuning: # kernel packing will be pre-computed during compliation, so we skip # this part to make tuning records correct s[kernel_vec].pragma(co, "debug_skip_region") else: s[kernel_vec].parallel(co) return s
apache-2.0
PaulKinlan/cli-caniuse
site/app/scripts/bower_components/jsrepl-build/extern/python/unclosured/lib/python2.7/msilib/__init__.py
43
17579
# -*- coding: iso-8859-1 -*- # Copyright (C) 2005 Martin v. Löwis # Licensed to PSF under a Contributor Agreement. from _msi import * import os, string, re, sys AMD64 = "AMD64" in sys.version Itanium = "Itanium" in sys.version Win64 = AMD64 or Itanium # Partially taken from Wine datasizemask= 0x00ff type_valid= 0x0100 type_localizable= 0x0200 typemask= 0x0c00 type_long= 0x0000 type_short= 0x0400 type_string= 0x0c00 type_binary= 0x0800 type_nullable= 0x1000 type_key= 0x2000 # XXX temporary, localizable? knownbits = datasizemask | type_valid | type_localizable | \ typemask | type_nullable | type_key class Table: def __init__(self, name): self.name = name self.fields = [] def add_field(self, index, name, type): self.fields.append((index,name,type)) def sql(self): fields = [] keys = [] self.fields.sort() fields = [None]*len(self.fields) for index, name, type in self.fields: index -= 1 unk = type & ~knownbits if unk: print "%s.%s unknown bits %x" % (self.name, name, unk) size = type & datasizemask dtype = type & typemask if dtype == type_string: if size: tname="CHAR(%d)" % size else: tname="CHAR" elif dtype == type_short: assert size==2 tname = "SHORT" elif dtype == type_long: assert size==4 tname="LONG" elif dtype == type_binary: assert size==0 tname="OBJECT" else: tname="unknown" print "%s.%sunknown integer type %d" % (self.name, name, size) if type & type_nullable: flags = "" else: flags = " NOT NULL" if type & type_localizable: flags += " LOCALIZABLE" fields[index] = "`%s` %s%s" % (name, tname, flags) if type & type_key: keys.append("`%s`" % name) fields = ", ".join(fields) keys = ", ".join(keys) return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys) def create(self, db): v = db.OpenView(self.sql()) v.Execute(None) v.Close() class _Unspecified:pass def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified): "Change the sequence number of an action in a sequence list" for i in range(len(seq)): if seq[i][0] == action: if cond is _Unspecified: cond = seq[i][1] if seqno is _Unspecified: seqno = seq[i][2] seq[i] = (action, cond, seqno) return raise ValueError, "Action not found in sequence" def add_data(db, table, values): v = db.OpenView("SELECT * FROM `%s`" % table) count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount() r = CreateRecord(count) for value in values: assert len(value) == count, value for i in range(count): field = value[i] if isinstance(field, (int, long)): r.SetInteger(i+1,field) elif isinstance(field, basestring): r.SetString(i+1,field) elif field is None: pass elif isinstance(field, Binary): r.SetStream(i+1, field.name) else: raise TypeError, "Unsupported type %s" % field.__class__.__name__ try: v.Modify(MSIMODIFY_INSERT, r) except Exception, e: raise MSIError("Could not insert "+repr(values)+" into "+table) r.ClearData() v.Close() def add_stream(db, name, path): v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name) r = CreateRecord(1) r.SetStream(1, path) v.Execute(r) v.Close() def init_database(name, schema, ProductName, ProductCode, ProductVersion, Manufacturer): try: os.unlink(name) except OSError: pass ProductCode = ProductCode.upper() # Create the database db = OpenDatabase(name, MSIDBOPEN_CREATE) # Create the tables for t in schema.tables: t.create(db) # Fill the validation table add_data(db, "_Validation", schema._Validation_records) # Initialize the summary information, allowing atmost 20 properties si = db.GetSummaryInformation(20) si.SetProperty(PID_TITLE, "Installation Database") si.SetProperty(PID_SUBJECT, ProductName) si.SetProperty(PID_AUTHOR, Manufacturer) if Itanium: si.SetProperty(PID_TEMPLATE, "Intel64;1033") elif AMD64: si.SetProperty(PID_TEMPLATE, "x64;1033") else: si.SetProperty(PID_TEMPLATE, "Intel;1033") si.SetProperty(PID_REVNUMBER, gen_uuid()) si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media si.SetProperty(PID_PAGECOUNT, 200) si.SetProperty(PID_APPNAME, "Python MSI Library") # XXX more properties si.Persist() add_data(db, "Property", [ ("ProductName", ProductName), ("ProductCode", ProductCode), ("ProductVersion", ProductVersion), ("Manufacturer", Manufacturer), ("ProductLanguage", "1033")]) db.Commit() return db def add_tables(db, module): for table in module.tables: add_data(db, table, getattr(module, table)) def make_id(str): identifier_chars = string.ascii_letters + string.digits + "._" str = "".join([c if c in identifier_chars else "_" for c in str]) if str[0] in (string.digits + "."): str = "_" + str assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str return str def gen_uuid(): return "{"+UuidCreate().upper()+"}" class CAB: def __init__(self, name): self.name = name self.files = [] self.filenames = set() self.index = 0 def gen_id(self, file): logical = _logical = make_id(file) pos = 1 while logical in self.filenames: logical = "%s.%d" % (_logical, pos) pos += 1 self.filenames.add(logical) return logical def append(self, full, file, logical): if os.path.isdir(full): return if not logical: logical = self.gen_id(file) self.index += 1 self.files.append((full, logical)) return self.index, logical def commit(self, db): from tempfile import mktemp filename = mktemp() FCICreate(filename, self.files) add_data(db, "Media", [(1, self.index, None, "#"+self.name, None, None)]) add_stream(db, self.name, filename) os.unlink(filename) db.Commit() _directories = set() class Directory: def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None): """Create a new directory in the Directory table. There is a current component at each point in time for the directory, which is either explicitly created through start_component, or implicitly when files are added for the first time. Files are added into the current component, and into the cab file. To create a directory, a base directory object needs to be specified (can be None), the path to the physical directory, and a logical directory name. Default specifies the DefaultDir slot in the directory table. componentflags specifies the default flags that new components get.""" index = 1 _logical = make_id(_logical) logical = _logical while logical in _directories: logical = "%s%d" % (_logical, index) index += 1 _directories.add(logical) self.db = db self.cab = cab self.basedir = basedir self.physical = physical self.logical = logical self.component = None self.short_names = set() self.ids = set() self.keyfiles = {} self.componentflags = componentflags if basedir: self.absolute = os.path.join(basedir.absolute, physical) blogical = basedir.logical else: self.absolute = physical blogical = None add_data(db, "Directory", [(logical, blogical, default)]) def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None): """Add an entry to the Component table, and make this component the current for this directory. If no component name is given, the directory name is used. If no feature is given, the current feature is used. If no flags are given, the directory's default flags are used. If no keyfile is given, the KeyPath is left null in the Component table.""" if flags is None: flags = self.componentflags if uuid is None: uuid = gen_uuid() else: uuid = uuid.upper() if component is None: component = self.logical self.component = component if Win64: flags |= 256 if keyfile: keyid = self.cab.gen_id(self.absolute, keyfile) self.keyfiles[keyfile] = keyid else: keyid = None add_data(self.db, "Component", [(component, uuid, self.logical, flags, None, keyid)]) if feature is None: feature = current_feature add_data(self.db, "FeatureComponents", [(feature.id, component)]) def make_short(self, file): oldfile = file file = file.replace('+', '_') file = ''.join(c for c in file if not c in ' "/\[]:;=,') parts = file.split(".") if len(parts) > 1: prefix = "".join(parts[:-1]).upper() suffix = parts[-1].upper() if not prefix: prefix = suffix suffix = None else: prefix = file.upper() suffix = None if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and ( not suffix or len(suffix) <= 3): if suffix: file = prefix+"."+suffix else: file = prefix else: file = None if file is None or file in self.short_names: prefix = prefix[:6] if suffix: suffix = suffix[:3] pos = 1 while 1: if suffix: file = "%s~%d.%s" % (prefix, pos, suffix) else: file = "%s~%d" % (prefix, pos) if file not in self.short_names: break pos += 1 assert pos < 10000 if pos in (10, 100, 1000): prefix = prefix[:-1] self.short_names.add(file) assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names return file def add_file(self, file, src=None, version=None, language=None): """Add a file to the current component of the directory, starting a new one one if there is no current component. By default, the file name in the source and the file table will be identical. If the src file is specified, it is interpreted relative to the current directory. Optionally, a version and a language can be specified for the entry in the File table.""" if not self.component: self.start_component(self.logical, current_feature, 0) if not src: # Allow relative paths for file if src is not specified src = file file = os.path.basename(file) absolute = os.path.join(self.absolute, src) assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names if file in self.keyfiles: logical = self.keyfiles[file] else: logical = None sequence, logical = self.cab.append(absolute, file, logical) assert logical not in self.ids self.ids.add(logical) short = self.make_short(file) full = "%s|%s" % (short, file) filesize = os.stat(absolute).st_size # constants.msidbFileAttributesVital # Compressed omitted, since it is the database default # could add r/o, system, hidden attributes = 512 add_data(self.db, "File", [(logical, self.component, full, filesize, version, language, attributes, sequence)]) #if not version: # # Add hash if the file is not versioned # filehash = FileHash(absolute, 0) # add_data(self.db, "MsiFileHash", # [(logical, 0, filehash.IntegerData(1), # filehash.IntegerData(2), filehash.IntegerData(3), # filehash.IntegerData(4))]) # Automatically remove .pyc/.pyo files on uninstall (2) # XXX: adding so many RemoveFile entries makes installer unbelievably # slow. So instead, we have to use wildcard remove entries if file.endswith(".py"): add_data(self.db, "RemoveFile", [(logical+"c", self.component, "%sC|%sc" % (short, file), self.logical, 2), (logical+"o", self.component, "%sO|%so" % (short, file), self.logical, 2)]) return logical def glob(self, pattern, exclude = None): """Add a list of files to the current component as specified in the glob pattern. Individual files can be excluded in the exclude list.""" files = glob.glob1(self.absolute, pattern) for f in files: if exclude and f in exclude: continue self.add_file(f) return files def remove_pyc(self): "Remove .pyc/.pyo files on uninstall" add_data(self.db, "RemoveFile", [(self.component+"c", self.component, "*.pyc", self.logical, 2), (self.component+"o", self.component, "*.pyo", self.logical, 2)]) class Binary: def __init__(self, fname): self.name = fname def __repr__(self): return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name class Feature: def __init__(self, db, id, title, desc, display, level = 1, parent=None, directory = None, attributes=0): self.id = id if parent: parent = parent.id add_data(db, "Feature", [(id, parent, title, desc, display, level, directory, attributes)]) def set_current(self): global current_feature current_feature = self class Control: def __init__(self, dlg, name): self.dlg = dlg self.name = name def event(self, event, argument, condition = "1", ordering = None): add_data(self.dlg.db, "ControlEvent", [(self.dlg.name, self.name, event, argument, condition, ordering)]) def mapping(self, event, attribute): add_data(self.dlg.db, "EventMapping", [(self.dlg.name, self.name, event, attribute)]) def condition(self, action, condition): add_data(self.dlg.db, "ControlCondition", [(self.dlg.name, self.name, action, condition)]) class RadioButtonGroup(Control): def __init__(self, dlg, name, property): self.dlg = dlg self.name = name self.property = property self.index = 1 def add(self, name, x, y, w, h, text, value = None): if value is None: value = name add_data(self.dlg.db, "RadioButton", [(self.property, self.index, value, x, y, w, h, text, None)]) self.index += 1 class Dialog: def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel): self.db = db self.name = name self.x, self.y, self.w, self.h = x,y,w,h add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)]) def control(self, name, type, x, y, w, h, attr, prop, text, next, help): add_data(self.db, "Control", [(self.name, name, type, x, y, w, h, attr, prop, text, next, help)]) return Control(self, name) def text(self, name, x, y, w, h, attr, text): return self.control(name, "Text", x, y, w, h, attr, None, text, None, None) def bitmap(self, name, x, y, w, h, text): return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None) def line(self, name, x, y, w, h): return self.control(name, "Line", x, y, w, h, 1, None, None, None, None) def pushbutton(self, name, x, y, w, h, attr, text, next): return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None) def radiogroup(self, name, x, y, w, h, attr, prop, text, next): add_data(self.db, "Control", [(self.name, name, "RadioButtonGroup", x, y, w, h, attr, prop, text, next, None)]) return RadioButtonGroup(self, name, prop) def checkbox(self, name, x, y, w, h, attr, prop, text, next): return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
apache-2.0
wuga214/Django-Wuga
env/lib/python2.7/site-packages/django/template/__init__.py
165
1889
""" Django's support for templates. The django.template namespace contains two independent subsystems: 1. Multiple Template Engines: support for pluggable template backends, built-in backends and backend-independent APIs 2. Django Template Language: Django's own template engine, including its built-in loaders, context processors, tags and filters. Ideally these subsystems would be implemented in distinct packages. However keeping them together made the implementation of Multiple Template Engines less disruptive . Here's a breakdown of which modules belong to which subsystem. Multiple Template Engines: - django.template.backends.* - django.template.loader - django.template.response Django Template Language: - django.template.base - django.template.context - django.template.context_processors - django.template.loaders.* - django.template.debug - django.template.defaultfilters - django.template.defaulttags - django.template.engine - django.template.loader_tags - django.template.smartif Shared: - django.template.utils """ # Multiple Template Engines from .engine import Engine from .utils import EngineHandler engines = EngineHandler() __all__ = ('Engine', 'engines') # Django Template Language # Public exceptions from .base import VariableDoesNotExist # NOQA isort:skip from .context import ContextPopException # NOQA isort:skip from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip # Template parts from .base import ( # NOQA isort:skip Context, Node, NodeList, Origin, RequestContext, StringOrigin, Template, Variable, ) # Library management from .library import Library # NOQA isort:skip __all__ += ('Template', 'Context', 'RequestContext')
apache-2.0
binhqnguyen/lena
src/lte/test/examples-to-run.py
151
3664
#! /usr/bin/env python # -*- coding: utf-8 -*- ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*- # A list of C++ examples to run in order to ensure that they remain # buildable and runnable over time. Each tuple in the list contains # # (example_name, do_run, do_valgrind_run). # # See test.py for more information. cpp_examples = [ ("lena-cqi-threshold", "True", "True"), ("lena-dual-stripe", "True", "True"), ("lena-dual-stripe --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"), ("lena-dual-stripe --epc=1 --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"), ("lena-dual-stripe --simTime=0.01", "True", "True"), ("lena-dual-stripe --epc=1 --simTime=0.01", "True", "True"), ("lena-dual-stripe --epc=1 --useUdp=0 --simTime=0.01", "True", "True"), ("lena-dual-stripe --epc=1 --fadingTrace=../../src/lte/model/fading-traces/fading_trace_EPA_3kmph.fad --simTime=0.01", "True", "True"), ("lena-dual-stripe --nBlocks=1 --nMacroEnbSites=0 --macroUeDensity=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --homeUesHomeEnbRatio=2 --macroEnbTxPowerDbm=0 --simTime=0.01", "True", "True"), ("lena-dual-stripe --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1 --nApartmentsX=4 --nMacroEnbSitesX=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --macroEnbTxPowerDbm=0 --epcDl=1 --epcUl=0 --epc=1 --numBearersPerUe=4 --homeUesHomeEnbRatio=15 --simTime=0.01", "True", "True"), ("lena-fading", "True", "True"), ("lena-gtpu-tunnel", "True", "True"), ("lena-intercell-interference --simTime=0.1", "True", "True"), ("lena-pathloss-traces", "True", "True"), ("lena-profiling", "True", "True"), ("lena-profiling --simTime=0.1 --nUe=2 --nEnb=5 --nFloors=0", "True", "True"), ("lena-profiling --simTime=0.1 --nUe=3 --nEnb=6 --nFloors=1", "True", "True"), ("lena-rlc-traces", "True", "True"), ("lena-rem", "True", "True"), ("lena-rem-sector-antenna", "True", "True"), ("lena-simple", "True", "True"), ("lena-simple-epc", "True", "True"), ("lena-x2-handover", "True", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TtaFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdTbfqFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdMtFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdBetFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::RrFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PssFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PfFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdTbfqFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdMtFfMacScheduler", "options.valgrind", "True"), ("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdBetFfMacScheduler", "options.valgrind", "True"), ] # A list of Python examples to run in order to ensure that they remain # runnable over time. Each tuple in the list contains # # (example_name, do_run). # # See test.py for more information. python_examples = []
gpl-2.0
andela-earinde/bellatrix-py
app/js/lib/lib/modules/pyrepl/pygame_console.py
13
11980
# Copyright 2000-2004 Michael Hudson-Doyle <[email protected]> # # All Rights Reserved # # # Permission to use, copy, modify, and distribute this software and # its documentation for any purpose is hereby granted without fee, # provided that the above copyright notice appear in all copies and # that both that copyright notice and this permission notice appear in # supporting documentation. # # THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, # INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER # RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # the pygame console is currently thoroughly broken. # there's a fundamental difference from the UnixConsole: here we're # the terminal emulator too, in effect. This means, e.g., for pythoni # we really need a separate process (or thread) to monitor for ^C # during command execution and zap the executor process. Making this # work on non-Unix is expected to be even more entertaining. from pygame.locals import * from pyrepl.console import Console, Event from pyrepl import pygame_keymap import pygame import types lmargin = 5 rmargin = 5 tmargin = 5 bmargin = 5 try: bool except NameError: def bool(x): return not not x modcolors = {K_LCTRL:1, K_RCTRL:1, K_LMETA:1, K_RMETA:1, K_LALT:1, K_RALT:1, K_LSHIFT:1, K_RSHIFT:1} class colors: fg = 250,240,230 bg = 5, 5, 5 cursor = 230, 0, 230 margin = 5, 5, 15 class FakeStdout: def __init__(self, con): self.con = con def write(self, text): self.con.write(text) def flush(self): pass class FakeStdin: def __init__(self, con): self.con = con def read(self, n=None): # argh! raise NotImplementedError def readline(self, n=None): from reader import Reader try: # this isn't quite right: it will clobber any prompt that's # been printed. Not sure how to get around this... return Reader(self.con).readline() except EOFError: return '' class PyGameConsole(Console): """Attributes: (keymap), (fd), screen, height, width, """ def __init__(self): self.pygame_screen = pygame.display.set_mode((800, 600)) pygame.font.init() pygame.key.set_repeat(500, 30) self.font = pygame.font.Font( "/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15) self.fw, self.fh = self.fontsize = self.font.size("X") self.cursor = pygame.Surface(self.fontsize) self.cursor.fill(colors.cursor) self.clear() self.curs_vis = 1 self.height, self.width = self.getheightwidth() pygame.display.update() pygame.event.set_allowed(None) pygame.event.set_allowed(KEYDOWN) def install_keymap(self, keymap): """Install a given keymap. keymap is a tuple of 2-element tuples; each small tuple is a pair (keyspec, event-name). The format for keyspec is modelled on that used by readline (so read that manual for now!).""" self.k = self.keymap = pygame_keymap.compile_keymap(keymap) def char_rect(self, x, y): return self.char_pos(x, y), self.fontsize def char_pos(self, x, y): return (lmargin + x*self.fw, tmargin + y*self.fh + self.cur_top + self.scroll) def paint_margin(self): s = self.pygame_screen c = colors.margin s.fill(c, [0, 0, 800, tmargin]) s.fill(c, [0, 0, lmargin, 600]) s.fill(c, [0, 600 - bmargin, 800, bmargin]) s.fill(c, [800 - rmargin, 0, lmargin, 600]) def refresh(self, screen, cxy): self.screen = screen self.pygame_screen.fill(colors.bg, [0, tmargin + self.cur_top + self.scroll, 800, 600]) self.paint_margin() line_top = self.cur_top width, height = self.fontsize self.cxy = cxy cp = self.char_pos(*cxy) if cp[1] < tmargin: self.scroll = - (cy*self.fh + self.cur_top) self.repaint() elif cp[1] + self.fh > 600 - bmargin: self.scroll += (600 - bmargin) - (cp[1] + self.fh) self.repaint() if self.curs_vis: self.pygame_screen.blit(self.cursor, self.char_pos(*cxy)) for line in screen: if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): if line: ren = self.font.render(line, 1, colors.fg) self.pygame_screen.blit(ren, (lmargin, tmargin + line_top + self.scroll)) line_top += self.fh pygame.display.update() def prepare(self): self.cmd_buf = '' self.k = self.keymap self.height, self.width = self.getheightwidth() self.curs_vis = 1 self.cur_top = self.pos[0] self.event_queue = [] def restore(self): pass def blit_a_char(self, linen, charn): line = self.screen[linen] if charn < len(line): text = self.font.render(line[charn], 1, colors.fg) self.pygame_screen.blit(text, self.char_pos(charn, linen)) def move_cursor(self, x, y): cp = self.char_pos(x, y) if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin: self.event_queue.append(Event('refresh', '', '')) else: if self.curs_vis: cx, cy = self.cxy self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) self.blit_a_char(cy, cx) self.pygame_screen.blit(self.cursor, cp) self.blit_a_char(y, x) pygame.display.update() self.cxy = (x, y) def set_cursor_vis(self, vis): self.curs_vis = vis if vis: self.move_cursor(*self.cxy) else: cx, cy = self.cxy self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) self.blit_a_char(cy, cx) pygame.display.update() def getheightwidth(self): """Return (height, width) where height and width are the height and width of the terminal window in characters.""" return ((600 - tmargin - bmargin)/self.fh, (800 - lmargin - rmargin)/self.fw) def tr_event(self, pyg_event): shift = bool(pyg_event.mod & KMOD_SHIFT) ctrl = bool(pyg_event.mod & KMOD_CTRL) meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META)) try: return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode except KeyError: try: return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode except KeyError: return "invalid-key", pyg_event.unicode def get_event(self, block=1): """Return an Event instance. Returns None if |block| is false and there is no event pending, otherwise waits for the completion of an event.""" while 1: if self.event_queue: return self.event_queue.pop(0) elif block: pyg_event = pygame.event.wait() else: pyg_event = pygame.event.poll() if pyg_event.type == NOEVENT: return if pyg_event.key in modcolors: continue k, c = self.tr_event(pyg_event) self.cmd_buf += c.encode('ascii', 'replace') self.k = k if not isinstance(k, types.DictType): e = Event(k, self.cmd_buf, []) self.k = self.keymap self.cmd_buf = '' return e def beep(self): # uhh, can't be bothered now. # pygame.sound.something, I guess. pass def clear(self): """Wipe the screen""" self.pygame_screen.fill(colors.bg) #self.screen = [] self.pos = [0, 0] self.grobs = [] self.cur_top = 0 self.scroll = 0 def finish(self): """Move the cursor to the end of the display and otherwise get ready for end. XXX could be merged with restore? Hmm.""" if self.curs_vis: cx, cy = self.cxy self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) self.blit_a_char(cy, cx) for line in self.screen: self.write_line(line, 1) if self.curs_vis: self.pygame_screen.blit(self.cursor, (lmargin + self.pos[1], tmargin + self.pos[0] + self.scroll)) pygame.display.update() def flushoutput(self): """Flush all output to the screen (assuming there's some buffering going on somewhere)""" # no buffering here, ma'am (though perhaps there should be!) pass def forgetinput(self): """Forget all pending, but not yet processed input.""" while pygame.event.poll().type <> NOEVENT: pass def getpending(self): """Return the characters that have been typed but not yet processed.""" events = [] while 1: event = pygame.event.poll() if event.type == NOEVENT: break events.append(event) return events def wait(self): """Wait for an event.""" raise Exception, "erp!" def repaint(self): # perhaps we should consolidate grobs? self.pygame_screen.fill(colors.bg) self.paint_margin() for (y, x), surf, text in self.grobs: if surf and 0 < y + self.scroll: self.pygame_screen.blit(surf, (lmargin + x, tmargin + y + self.scroll)) pygame.display.update() def write_line(self, line, ret): charsleft = (self.width*self.fw - self.pos[1])/self.fw while len(line) > charsleft: self.write_line(line[:charsleft], 1) line = line[charsleft:] if line: ren = self.font.render(line, 1, colors.fg, colors.bg) self.grobs.append((self.pos[:], ren, line)) self.pygame_screen.blit(ren, (lmargin + self.pos[1], tmargin + self.pos[0] + self.scroll)) else: self.grobs.append((self.pos[:], None, line)) if ret: self.pos[0] += self.fh if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin: self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin self.repaint() self.pos[1] = 0 else: self.pos[1] += self.fw*len(line) def write(self, text): lines = text.split("\n") if self.curs_vis: self.pygame_screen.fill(colors.bg, (lmargin + self.pos[1], tmargin + self.pos[0] + self.scroll, self.fw, self.fh)) for line in lines[:-1]: self.write_line(line, 1) self.write_line(lines[-1], 0) if self.curs_vis: self.pygame_screen.blit(self.cursor, (lmargin + self.pos[1], tmargin + self.pos[0] + self.scroll)) pygame.display.update() def flush(self): pass
mit
ROMFactory/android_external_chromium_org
build/android/symbolize.py
97
2639
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Symbolizes stack traces generated by Chromium for Android. Sample usage: adb logcat chromium:V | symbolize.py """ import os import re import sys from pylib import constants # Uses symbol.py from third_party/android_platform, not python's. sys.path.insert(0, os.path.join(constants.DIR_SOURCE_ROOT, 'third_party/android_platform/development/scripts')) import symbol # Sample output from base/debug/stack_trace_android.cc #00 0x693cd34f /path/to/some/libfoo.so+0x0007434f TRACE_LINE = re.compile('(?P<frame>\#[0-9]+ 0x[0-9a-f]{8,8}) ' '(?P<lib>[^+]+)\+0x(?P<addr>[0-9a-f]{8,8})') class Symbolizer(object): def __init__(self, output): self._output = output def write(self, data): while True: match = re.search(TRACE_LINE, data) if not match: self._output.write(data) break frame = match.group('frame') lib = match.group('lib') addr = match.group('addr') # TODO(scherkus): Doing a single lookup per line is pretty slow, # especially with larger libraries. Consider caching strategies such as: # 1) Have Python load the libraries and do symbol lookups instead of # calling out to addr2line each time. # 2) Have Python keep multiple addr2line instances open as subprocesses, # piping addresses and reading back symbols as we find them # 3) Read ahead the entire stack trace until we find no more, then batch # the symbol lookups. # # TODO(scherkus): These results are memoized, which could result in # incorrect lookups when running this script on long-lived instances # (e.g., adb logcat) when doing incremental development. Consider clearing # the cache when modification timestamp of libraries change. sym = symbol.SymbolInformation(lib, addr, False)[0][0] if not sym: post = match.end('addr') self._output.write(data[:post]) data = data[post:] continue pre = match.start('frame') post = match.end('addr') self._output.write(data[:pre]) self._output.write(frame) self._output.write(' ') self._output.write(sym) data = data[post:] def flush(self): self._output.flush() def main(): symbolizer = Symbolizer(sys.stdout) for line in sys.stdin: symbolizer.write(line) symbolizer.flush() if __name__ == '__main__': main()
bsd-3-clause
dietrichc/streamline-ppc-reports
examples/dfp/v201411/custom_targeting_service/update_custom_targeting_keys.py
4
2407
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example updates the display name of a single custom targeting key. To determine which custom targeting keys exist, run get_all_custom_targeting_keys_and_values.py.""" __author__ = ('Nicholas Chen', 'Joseph DiLallo') # Import appropriate modules from the client library. from googleads import dfp CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE' def main(client, key_id): # Initialize appropriate service. custom_targeting_service = client.GetService( 'CustomTargetingService', version='v201411') values = [{ 'key': 'keyId', 'value': { 'xsi_type': 'NumberValue', 'value': key_id } }] query = 'WHERE id = :keyId' statement = dfp.FilterStatement(query, values, 1) # Get custom targeting keys by statement. response = custom_targeting_service.getCustomTargetingKeysByStatement( statement.ToStatement()) # Update each local custom targeting key object by changing its display name. if 'results' in response: updated_keys = [] for key in response['results']: if not key['displayName']: key['displayName'] = key['name'] key['displayName'] += ' (Deprecated)' updated_keys.append(key) keys = custom_targeting_service.updateCustomTargetingKeys(updated_keys) # Display results. if keys: for key in keys: print ('Custom targeting key with id \'%s\', name \'%s\', display name ' '\'%s\', and type \'%s\' was updated.' % (key['id'], key['name'], key['displayName'], key['type'])) else: print 'No custom targeting keys were found to update.' if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client, CUSTOM_TARGETING_KEY_ID)
apache-2.0
ioam/holoviews
holoviews/tests/core/testcollation.py
2
3718
""" Test cases for Collator """ import itertools import numpy as np from holoviews.core import Collator, HoloMap, NdOverlay, Overlay, GridSpace from holoviews.element import Curve from holoviews.element.comparison import ComparisonTestCase class TestCollation(ComparisonTestCase): def setUp(self): alphas, betas, deltas = 2, 2, 2 Bs = list(range(100)) coords = itertools.product(*(range(n) for n in [alphas, betas, deltas])) mus=np.random.rand(alphas, betas, 100, 10) self.phase_boundaries = {(a, b, d): Curve(zip(Bs, mus[a, b, :, i]*a+b)) for i in range(10) for a, b, d in coords} self.dimensions = ['alpha', 'beta', 'delta'] self.nesting_hmap = HoloMap(self.phase_boundaries, kdims=self.dimensions) self.nested_hmap = self.nesting_hmap.groupby(['alpha']) self.nested_overlay = self.nesting_hmap.overlay(['delta']) self.nested_grid = self.nested_overlay.grid(['alpha', 'beta']) self.nested_layout = self.nested_overlay.layout(['alpha', 'beta']) def test_collate_hmap(self): collated = self.nested_hmap.collate() self.assertEqual(collated.kdims, self.nesting_hmap.kdims) self.assertEqual(collated.keys(), self.nesting_hmap.keys()) self.assertEqual(collated.type, self.nesting_hmap.type) self.assertEqual(repr(collated), repr(self.nesting_hmap)) def test_collate_ndoverlay(self): collated = self.nested_overlay.collate(NdOverlay) ndoverlay = NdOverlay(self.phase_boundaries, kdims=self.dimensions) self.assertEqual(collated.kdims, ndoverlay.kdims) self.assertEqual(collated.keys(), ndoverlay.keys()) self.assertEqual(repr(collated), repr(ndoverlay)) def test_collate_gridspace_ndoverlay(self): grid = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).grid(['alpha', 'beta']) self.assertEqual(grid.dimensions(), self.nested_grid.dimensions()) self.assertEqual(grid.keys(), self.nested_grid.keys()) self.assertEqual(repr(grid), repr(self.nested_grid)) def test_collate_ndlayout_ndoverlay(self): layout = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).layout(['alpha', 'beta']) self.assertEqual(layout.dimensions(), self.nested_layout.dimensions()) self.assertEqual(layout.keys(), self.nested_layout.keys()) self.assertEqual(repr(layout), repr(self.nested_layout)) def test_collate_layout_overlay(self): layout = self.nested_overlay + self.nested_overlay collated = Collator(kdims=['alpha', 'beta']) for k, v in self.nested_overlay.items(): collated[k] = v + v collated = collated() self.assertEqual(collated.dimensions(), layout.dimensions()) def test_collate_layout_hmap(self): layout = self.nested_overlay + self.nested_overlay collated = Collator(kdims=['delta'], merge_type=NdOverlay) for k, v in self.nesting_hmap.groupby(['delta']).items(): collated[k] = v + v collated = collated() self.assertEqual(repr(collated), repr(layout)) self.assertEqual(collated.dimensions(), layout.dimensions()) def test_overlay_hmap_collate(self): hmap = HoloMap({i: Curve(np.arange(10)*i) for i in range(3)}) overlaid = Overlay([hmap, hmap, hmap]).collate() self.assertEqual(overlaid, hmap*hmap*hmap) def test_overlay_gridspace_collate(self): grid = GridSpace({(i,j): Curve(np.arange(10)*i) for i in range(3) for j in range(3)}) overlaid = Overlay([grid, grid, grid]).collate() self.assertEqual(overlaid, grid*grid*grid)
bsd-3-clause
abreen/socrates.py
blessed/tests/test_core.py
1
14455
# -*- coding: utf-8 -*- "Core blessed Terminal() tests." # std try: from StringIO import StringIO except ImportError: from io import StringIO import collections import warnings import platform import locale import sys import imp import os # local from .accessories import ( as_subprocess, TestTerminal, unicode_cap, all_terms ) # 3rd party import mock import pytest def test_export_only_Terminal(): "Ensure only Terminal instance is exported for import * statements." import blessed assert blessed.__all__ == ('Terminal',) def test_null_location(all_terms): "Make sure ``location()`` with no args just does position restoration." @as_subprocess def child(kind): t = TestTerminal(stream=StringIO(), force_styling=True) with t.location(): pass expected_output = u''.join( (unicode_cap('sc'), unicode_cap('rc'))) assert (t.stream.getvalue() == expected_output) child(all_terms) def test_flipped_location_move(all_terms): "``location()`` and ``move()`` receive counter-example arguments." @as_subprocess def child(kind): buf = StringIO() t = TestTerminal(stream=buf, force_styling=True) y, x = 10, 20 with t.location(y, x): xy_val = t.move(x, y) yx_val = buf.getvalue()[len(t.sc):] assert xy_val == yx_val child(all_terms) def test_yield_keypad(): "Ensure ``keypad()`` writes keyboard_xmit and keyboard_local." @as_subprocess def child(kind): # given, t = TestTerminal(stream=StringIO(), force_styling=True) expected_output = u''.join((t.smkx, t.rmkx)) # exercise, with t.keypad(): pass # verify. assert (t.stream.getvalue() == expected_output) child(kind='xterm') def test_null_fileno(): "Make sure ``Terminal`` works when ``fileno`` is ``None``." @as_subprocess def child(): # This simulates piping output to another program. out = StringIO() out.fileno = None t = TestTerminal(stream=out) assert (t.save == u'') child() def test_number_of_colors_without_tty(): "``number_of_colors`` should return 0 when there's no tty." @as_subprocess def child_256_nostyle(): t = TestTerminal(stream=StringIO()) assert (t.number_of_colors == 0) @as_subprocess def child_256_forcestyle(): t = TestTerminal(stream=StringIO(), force_styling=True) assert (t.number_of_colors == 256) @as_subprocess def child_8_forcestyle(): kind = 'ansi' if platform.system().lower() == 'freebsd': # 'ansi' on freebsd returns 0 colors, we use the 'cons25' driver, # compatible with its kernel tty.c kind = 'cons25' t = TestTerminal(kind=kind, stream=StringIO(), force_styling=True) assert (t.number_of_colors == 8) @as_subprocess def child_0_forcestyle(): t = TestTerminal(kind='vt220', stream=StringIO(), force_styling=True) assert (t.number_of_colors == 0) child_0_forcestyle() child_8_forcestyle() child_256_forcestyle() child_256_nostyle() def test_number_of_colors_with_tty(): "test ``number_of_colors`` 0, 8, and 256." @as_subprocess def child_256(): t = TestTerminal() assert (t.number_of_colors == 256) @as_subprocess def child_8(): kind = 'ansi' if platform.system().lower() == 'freebsd': # 'ansi' on freebsd returns 0 colors, we use the 'cons25' driver, # compatible with its kernel tty.c kind = 'cons25' t = TestTerminal(kind=kind) assert (t.number_of_colors == 8) @as_subprocess def child_0(): t = TestTerminal(kind='vt220') assert (t.number_of_colors == 0) child_0() child_8() child_256() def test_init_descriptor_always_initted(all_terms): "Test height and width with non-tty Terminals." @as_subprocess def child(kind): t = TestTerminal(kind=kind, stream=StringIO()) assert t._init_descriptor == sys.__stdout__.fileno() assert (isinstance(t.height, int)) assert (isinstance(t.width, int)) assert t.height == t._height_and_width()[0] assert t.width == t._height_and_width()[1] child(all_terms) def test_force_styling_none(all_terms): "If ``force_styling=None`` is used, don't ever do styling." @as_subprocess def child(kind): t = TestTerminal(kind=kind, force_styling=None) assert (t.save == '') assert (t.color(9) == '') assert (t.bold('oi') == 'oi') child(all_terms) def test_setupterm_singleton_issue33(): "A warning is emitted if a new terminal ``kind`` is used per process." @as_subprocess def child(): warnings.filterwarnings("error", category=UserWarning) # instantiate first terminal, of type xterm-256color term = TestTerminal(force_styling=True) try: # a second instantiation raises UserWarning term = TestTerminal(kind="vt220", force_styling=True) except UserWarning: err = sys.exc_info()[1] assert (err.args[0].startswith( 'A terminal of kind "vt220" has been requested') ), err.args[0] assert ('a terminal of kind "xterm-256color" will ' 'continue to be returned' in err.args[0]), err.args[0] else: # unless term is not a tty and setupterm() is not called assert not term.is_a_tty or False, 'Should have thrown exception' warnings.resetwarnings() child() def test_setupterm_invalid_issue39(): "A warning is emitted if TERM is invalid." # https://bugzilla.mozilla.org/show_bug.cgi?id=878089 # # if TERM is unset, defaults to 'unknown', which should # fail to lookup and emit a warning on *some* systems. # freebsd actually has a termcap entry for 'unknown' @as_subprocess def child(): warnings.filterwarnings("error", category=UserWarning) try: term = TestTerminal(kind='unknown', force_styling=True) except UserWarning: err = sys.exc_info()[1] assert err.args[0] == ( "Failed to setupterm(kind='unknown'): " "setupterm: could not find terminal") else: if platform.system().lower() != 'freebsd': assert not term.is_a_tty and not term.does_styling, ( 'Should have thrown exception') warnings.resetwarnings() child() def test_setupterm_invalid_has_no_styling(): "An unknown TERM type does not perform styling." # https://bugzilla.mozilla.org/show_bug.cgi?id=878089 # if TERM is unset, defaults to 'unknown', which should # fail to lookup and emit a warning, only. @as_subprocess def child(): warnings.filterwarnings("ignore", category=UserWarning) term = TestTerminal(kind='xxXunknownXxx', force_styling=True) assert term.kind is None assert term.does_styling is False assert term.number_of_colors == 0 warnings.resetwarnings() child() @pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='PyPy freezes') def test_missing_ordereddict_uses_module(monkeypatch): "ordereddict module is imported when without collections.OrderedDict." import blessed.keyboard if hasattr(collections, 'OrderedDict'): monkeypatch.delattr('collections.OrderedDict') try: imp.reload(blessed.keyboard) except ImportError as err: assert err.args[0] in ("No module named ordereddict", # py2 "No module named 'ordereddict'") # py3 sys.modules['ordereddict'] = mock.Mock() sys.modules['ordereddict'].OrderedDict = -1 imp.reload(blessed.keyboard) assert blessed.keyboard.OrderedDict == -1 del sys.modules['ordereddict'] monkeypatch.undo() imp.reload(blessed.keyboard) else: assert platform.python_version_tuple() < ('2', '7') # reached by py2.6 @pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='PyPy freezes') def test_python3_2_raises_exception(monkeypatch): "Test python version 3.0 through 3.2 raises an exception." import blessed monkeypatch.setattr('platform.python_version_tuple', lambda: ('3', '2', '2')) try: imp.reload(blessed) except ImportError as err: assert err.args[0] == ( 'Blessed needs Python 3.2.3 or greater for Python 3 ' 'support due to http://bugs.python.org/issue10570.') monkeypatch.undo() imp.reload(blessed) else: assert False, 'Exception should have been raised' def test_IOUnsupportedOperation_dummy(monkeypatch): "Ensure dummy exception is used when io is without UnsupportedOperation." import blessed.terminal import io if hasattr(io, 'UnsupportedOperation'): monkeypatch.delattr('io.UnsupportedOperation') imp.reload(blessed.terminal) assert blessed.terminal.IOUnsupportedOperation.__doc__.startswith( "A dummy exception to take the place of") monkeypatch.undo() imp.reload(blessed.terminal) def test_without_dunder(): "Ensure dunder does not remain in module (py2x InterruptedError test." import blessed.terminal assert '_' not in dir(blessed.terminal) def test_IOUnsupportedOperation(): "Ensure stream that throws IOUnsupportedOperation results in non-tty." @as_subprocess def child(): import blessed.terminal def side_effect(): raise blessed.terminal.IOUnsupportedOperation mock_stream = mock.Mock() mock_stream.fileno = side_effect term = TestTerminal(stream=mock_stream) assert term.stream == mock_stream assert term.does_styling is False assert term.is_a_tty is False assert term.number_of_colors is 0 child() def test_winsize_IOError_returns_environ(): """When _winsize raises IOError, defaults from os.environ given.""" @as_subprocess def child(): def side_effect(fd): raise IOError term = TestTerminal() term._winsize = side_effect os.environ['COLUMNS'] = '1984' os.environ['LINES'] = '1888' assert term._height_and_width() == (1888, 1984, None, None) child() def test_yield_fullscreen(all_terms): "Ensure ``fullscreen()`` writes enter_fullscreen and exit_fullscreen." @as_subprocess def child(kind): t = TestTerminal(stream=StringIO(), force_styling=True) t.enter_fullscreen = u'BEGIN' t.exit_fullscreen = u'END' with t.fullscreen(): pass expected_output = u''.join((t.enter_fullscreen, t.exit_fullscreen)) assert (t.stream.getvalue() == expected_output) child(all_terms) def test_yield_hidden_cursor(all_terms): "Ensure ``hidden_cursor()`` writes hide_cursor and normal_cursor." @as_subprocess def child(kind): t = TestTerminal(stream=StringIO(), force_styling=True) t.hide_cursor = u'BEGIN' t.normal_cursor = u'END' with t.hidden_cursor(): pass expected_output = u''.join((t.hide_cursor, t.normal_cursor)) assert (t.stream.getvalue() == expected_output) child(all_terms) def test_no_preferredencoding_fallback_ascii(): "Ensure empty preferredencoding value defaults to ascii." @as_subprocess def child(): with mock.patch('locale.getpreferredencoding') as get_enc: get_enc.return_value = u'' t = TestTerminal() assert t._encoding == 'ascii' child() def test_unknown_preferredencoding_warned_and_fallback_ascii(): "Ensure a locale without a codecs incrementaldecoder emits a warning." @as_subprocess def child(): with mock.patch('locale.getpreferredencoding') as get_enc: with warnings.catch_warnings(record=True) as warned: get_enc.return_value = '---unknown--encoding---' t = TestTerminal() assert t._encoding == 'ascii' assert len(warned) == 1 assert issubclass(warned[-1].category, UserWarning) assert "fallback to ASCII" in str(warned[-1].message) child() def test_win32_missing_tty_modules(monkeypatch): "Ensure dummy exception is used when io is without UnsupportedOperation." @as_subprocess def child(): OLD_STYLE = False try: original_import = getattr(__builtins__, '__import__') OLD_STYLE = True except AttributeError: original_import = __builtins__['__import__'] tty_modules = ('termios', 'fcntl', 'tty') def __import__(name, *args, **kwargs): if name in tty_modules: raise ImportError return original_import(name, *args, **kwargs) for module in tty_modules: sys.modules.pop(module, None) warnings.filterwarnings("error", category=UserWarning) try: if OLD_STYLE: __builtins__.__import__ = __import__ else: __builtins__['__import__'] = __import__ try: import blessed.terminal imp.reload(blessed.terminal) except UserWarning: err = sys.exc_info()[1] assert err.args[0] == blessed.terminal.msg_nosupport warnings.filterwarnings("ignore", category=UserWarning) import blessed.terminal imp.reload(blessed.terminal) assert blessed.terminal.HAS_TTY is False term = blessed.terminal.Terminal('ansi') assert term.height == 24 assert term.width == 80 finally: if OLD_STYLE: setattr(__builtins__, '__import__', original_import) else: __builtins__['__import__'] = original_import warnings.resetwarnings() import blessed.terminal imp.reload(blessed.terminal) child()
gpl-2.0
IRI-Research/django
tests/managers_regress/tests.py
62
8228
from __future__ import unicode_literals from django.apps import apps from django.db import models from django.template import Context, Template from django.test import TestCase, override_settings from django.utils.encoding import force_text from .models import ( Child1, Child2, Child3, Child4, Child5, Child6, Child7, AbstractBase1, AbstractBase2, AbstractBase3, RelatedModel, RelationModel, ) class ManagersRegressionTests(TestCase): def test_managers(self): Child1.objects.create(name='fred', data='a1') Child1.objects.create(name='barney', data='a2') Child2.objects.create(name='fred', data='b1', value=1) Child2.objects.create(name='barney', data='b2', value=42) Child3.objects.create(name='fred', data='c1', comment='yes') Child3.objects.create(name='barney', data='c2', comment='no') Child4.objects.create(name='fred', data='d1') Child4.objects.create(name='barney', data='d2') Child5.objects.create(name='fred', comment='yes') Child5.objects.create(name='barney', comment='no') Child6.objects.create(name='fred', data='f1', value=42) Child6.objects.create(name='barney', data='f2', value=42) Child7.objects.create(name='fred') Child7.objects.create(name='barney') self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"]) self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"]) self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"]) self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"]) self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"]) self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"]) self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"]) self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"]) # Since Child6 inherits from Child4, the corresponding rows from f1 and # f2 also appear here. This is the expected result. self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [ "<Child4: d1>", "<Child4: d2>", "<Child4: f1>", "<Child4: f2>" ] ) self.assertQuerysetEqual(Child4.manager1.all(), [ "<Child4: d1>", "<Child4: f1>" ], ordered=False ) self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"]) self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>"]) self.assertQuerysetEqual(Child7._default_manager.order_by('name'), [ "<Child7: barney>", "<Child7: fred>" ] ) def test_abstract_manager(self): # Accessing the manager on an abstract model should # raise an attribute error with an appropriate message. try: AbstractBase3.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: # This error message isn't ideal, but if the model is abstract and # a lot of the class instantiation logic isn't invoked; if the # manager is implied, then we don't get a hook to install the # error-raising manager. self.assertEqual(str(e), "type object 'AbstractBase3' has no attribute 'objects'") def test_custom_abstract_manager(self): # Accessing the manager on an abstract model with an custom # manager should raise an attribute error with an appropriate # message. try: AbstractBase2.restricted.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; AbstractBase2 is abstract") def test_explicit_abstract_manager(self): # Accessing the manager on an abstract model with an explicit # manager should raise an attribute error with an appropriate # message. try: AbstractBase1.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; AbstractBase1 is abstract") @override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent') def test_swappable_manager(self): # The models need to be removed after the test in order to prevent bad # interactions with the flush operation in other tests. _old_models = apps.app_configs['managers_regress'].models.copy() try: class SwappableModel(models.Model): class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model should # raise an attribute error with a helpful message try: SwappableModel.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: apps.app_configs['managers_regress'].models = _old_models apps.all_models['managers_regress'] = _old_models apps.clear_cache() @override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent') def test_custom_swappable_manager(self): # The models need to be removed after the test in order to prevent bad # interactions with the flush operation in other tests. _old_models = apps.app_configs['managers_regress'].models.copy() try: class SwappableModel(models.Model): stuff = models.Manager() class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model with an # explicit manager should raise an attribute error with a # helpful message try: SwappableModel.stuff.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: apps.app_configs['managers_regress'].models = _old_models apps.all_models['managers_regress'] = _old_models apps.clear_cache() @override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent') def test_explicit_swappable_manager(self): # The models need to be removed after the test in order to prevent bad # interactions with the flush operation in other tests. _old_models = apps.app_configs['managers_regress'].models.copy() try: class SwappableModel(models.Model): objects = models.Manager() class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model with an # explicit manager should raise an attribute error with a # helpful message try: SwappableModel.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: apps.app_configs['managers_regress'].models = _old_models apps.all_models['managers_regress'] = _old_models apps.clear_cache() def test_regress_3871(self): related = RelatedModel.objects.create() relation = RelationModel() relation.fk = related relation.gfk = related relation.save() relation.m2m.add(related) t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}') self.assertEqual( t.render(Context({'related': related})), ''.join([force_text(relation.pk)] * 3), )
bsd-3-clause
zhoulingjun/zipline
zipline/utils/security_list.py
18
4472
from datetime import datetime from os import listdir import os.path import pandas as pd import pytz import zipline from zipline.finance.trading import with_environment DATE_FORMAT = "%Y%m%d" zipline_dir = os.path.dirname(zipline.__file__) SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists') class SecurityList(object): def __init__(self, data, current_date_func): """ data: a nested dictionary: knowledge_date -> lookup_date -> {add: [symbol list], 'delete': []}, delete: [symbol list]} current_date_func: function taking no parameters, returning current datetime """ self.data = data self._cache = {} self._knowledge_dates = self.make_knowledge_dates(self.data) self.current_date = current_date_func self.count = 0 self._current_set = set() def make_knowledge_dates(self, data): knowledge_dates = sorted( [pd.Timestamp(k) for k in data.keys()]) return knowledge_dates def __iter__(self): return iter(self.restricted_list) def __contains__(self, item): return item in self.restricted_list @property def restricted_list(self): cd = self.current_date() for kd in self._knowledge_dates: if cd < kd: break if kd in self._cache: self._current_set = self._cache[kd] continue for effective_date, changes in iter(self.data[kd].items()): self.update_current( effective_date, changes['add'], self._current_set.add ) self.update_current( effective_date, changes['delete'], self._current_set.remove ) self._cache[kd] = self._current_set return self._current_set @with_environment() def update_current(self, effective_date, symbols, change_func, env=None): for symbol in symbols: asset = env.asset_finder.lookup_symbol( symbol, as_of_date=effective_date ) # Pass if no Asset exists for the symbol if asset is None: continue change_func(asset.sid) class SecurityListSet(object): # provide a cut point to substitute other security # list implementations. security_list_type = SecurityList def __init__(self, current_date_func): self.current_date_func = current_date_func self._leveraged_etf = None @property def leveraged_etf_list(self): if self._leveraged_etf is None: self._leveraged_etf = self.security_list_type( load_from_directory('leveraged_etf_list'), self.current_date_func ) return self._leveraged_etf def load_from_directory(list_name): """ To resolve the symbol in the LEVERAGED_ETF list, the date on which the symbol was in effect is needed. Furthermore, to maintain a point in time record of our own maintenance of the restricted list, we need a knowledge date. Thus, restricted lists are dictionaries of datetime->symbol lists. new symbols should be entered as a new knowledge date entry. This method assumes a directory structure of: SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt The return value is a dictionary with: knowledge_date -> lookup_date -> {add: [symbol list], 'delete': [symbol list]} """ data = {} dir_path = os.path.join(SECURITY_LISTS_DIR, list_name) for kd_name in listdir(dir_path): kd = datetime.strptime(kd_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd] = {} kd_path = os.path.join(dir_path, kd_name) for ld_name in listdir(kd_path): ld = datetime.strptime(ld_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd][ld] = {} ld_path = os.path.join(kd_path, ld_name) for fname in listdir(ld_path): fpath = os.path.join(ld_path, fname) with open(fpath) as f: symbols = f.read().splitlines() data[kd][ld][fname] = symbols return data
apache-2.0
AOSPU/external_chromium_org
build/android/gyp/finalize_apk.py
9
1864
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Signs and zipaligns APK. """ import optparse import shutil import sys import tempfile from util import build_utils def SignApk(key_path, key_name, key_passwd, unsigned_path, signed_path): shutil.copy(unsigned_path, signed_path) sign_cmd = [ 'jarsigner', '-sigalg', 'MD5withRSA', '-digestalg', 'SHA1', '-keystore', key_path, '-storepass', key_passwd, signed_path, key_name, ] build_utils.CheckOutput(sign_cmd) def AlignApk(zipalign_path, unaligned_path, final_path): align_cmd = [ zipalign_path, '-f', '4', # 4 bytes unaligned_path, final_path, ] build_utils.CheckOutput(align_cmd) def main(): parser = optparse.OptionParser() parser.add_option('--zipalign-path', help='Path to the zipalign tool.') parser.add_option('--unsigned-apk-path', help='Path to input unsigned APK.') parser.add_option('--final-apk-path', help='Path to output signed and aligned APK.') parser.add_option('--key-path', help='Path to keystore for signing.') parser.add_option('--key-passwd', help='Keystore password') parser.add_option('--key-name', help='Keystore name') parser.add_option('--stamp', help='Path to touch on success.') options, _ = parser.parse_args() with tempfile.NamedTemporaryFile() as intermediate_file: signed_apk_path = intermediate_file.name SignApk(options.key_path, options.key_name, options.key_passwd, options.unsigned_apk_path, signed_apk_path) AlignApk(options.zipalign_path, signed_apk_path, options.final_apk_path) if options.stamp: build_utils.Touch(options.stamp) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
lamby/django-cache-toolbox
cache_toolbox/templatetags/cache_toolbox.py
1
2014
from django import template from django.core.cache import cache from django.template import Node, TemplateSyntaxError, Variable register = template.Library() class CacheNode(Node): def __init__(self, nodelist, expire_time, key): self.nodelist = nodelist self.expire_time = Variable(expire_time) self.key = Variable(key) def render(self, context): key = self.key.resolve(context) expire_time = int(self.expire_time.resolve(context)) value = cache.get(key) if value is None: value = self.nodelist.render(context) cache.set(key, value, expire_time) return value @register.tag def cachedeterministic(parser, token): """ This will cache the contents of a template fragment for a given amount of time, just like {% cache .. %} except that the key is deterministic and not mangled or run through MD5. Usage:: {% cachedeterministic [expire_time] [key] %} .. some expensive processing .. {% endcachedeterministic %} """ nodelist = parser.parse(("endcachedeterministic",)) parser.delete_first_token() tokens = token.contents.split() if len(tokens) != 3: raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0]) return CacheNode(nodelist, tokens[1], tokens[2]) class ShowIfCachedNode(Node): def __init__(self, key): self.key = Variable(key) def render(self, context): key = self.key.resolve(context) return cache.get(key) or "" @register.tag def showifcached(parser, token): """ Show content if it exists in the cache, otherwise display nothing. The key is entirely deterministic and not mangled or run through MD5 (cf. {% cache %}) Usage:: {% showifcached [key] %} """ tokens = token.contents.split() if len(tokens) != 2: raise TemplateSyntaxError(u"'%r' tag requires 1 argument." % tokens[0]) return ShowIfCachedNode(tokens[1])
bsd-3-clause
anant-dev/django
tests/forms_tests/models.py
261
3805
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import itertools import tempfile from django.core.files.storage import FileSystemStorage from django.db import models from django.utils.encoding import python_2_unicode_compatible callable_default_counter = itertools.count() def callable_default(): return next(callable_default_counter) temp_storage = FileSystemStorage(location=tempfile.mkdtemp()) class BoundaryModel(models.Model): positive_integer = models.PositiveIntegerField(null=True, blank=True) class Defaults(models.Model): name = models.CharField(max_length=255, default='class default value') def_date = models.DateField(default=datetime.date(1980, 1, 1)) value = models.IntegerField(default=42) callable_default = models.IntegerField(default=callable_default) class ChoiceModel(models.Model): """For ModelChoiceField and ModelMultipleChoiceField tests.""" CHOICES = [ ('', 'No Preference'), ('f', 'Foo'), ('b', 'Bar'), ] INTEGER_CHOICES = [ (None, 'No Preference'), (1, 'Foo'), (2, 'Bar'), ] STRING_CHOICES_WITH_NONE = [ (None, 'No Preference'), ('f', 'Foo'), ('b', 'Bar'), ] name = models.CharField(max_length=10) choice = models.CharField(max_length=2, blank=True, choices=CHOICES) choice_string_w_none = models.CharField( max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE) choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True, null=True) @python_2_unicode_compatible class ChoiceOptionModel(models.Model): """Destination for ChoiceFieldModel's ForeignKey. Can't reuse ChoiceModel because error_message tests require that it have no instances.""" name = models.CharField(max_length=10) class Meta: ordering = ('name',) def __str__(self): return 'ChoiceOption %d' % self.pk def choice_default(): return ChoiceOptionModel.objects.get_or_create(name='default')[0].pk def choice_default_list(): return [choice_default()] def int_default(): return 1 def int_list_default(): return [1] class ChoiceFieldModel(models.Model): """Model with ForeignKey to another model, for testing ModelForm generation with ModelChoiceField.""" choice = models.ForeignKey( ChoiceOptionModel, models.CASCADE, blank=False, default=choice_default, ) choice_int = models.ForeignKey( ChoiceOptionModel, models.CASCADE, blank=False, related_name='choice_int', default=int_default, ) multi_choice = models.ManyToManyField( ChoiceOptionModel, blank=False, related_name='multi_choice', default=choice_default_list, ) multi_choice_int = models.ManyToManyField( ChoiceOptionModel, blank=False, related_name='multi_choice_int', default=int_list_default, ) class OptionalMultiChoiceModel(models.Model): multi_choice = models.ManyToManyField( ChoiceOptionModel, blank=False, related_name='not_relevant', default=choice_default, ) multi_choice_optional = models.ManyToManyField( ChoiceOptionModel, blank=True, related_name='not_relevant2', ) class FileModel(models.Model): file = models.FileField(storage=temp_storage, upload_to='tests') @python_2_unicode_compatible class Group(models.Model): name = models.CharField(max_length=10) def __str__(self): return '%s' % self.name class Cheese(models.Model): name = models.CharField(max_length=100) class Article(models.Model): content = models.TextField()
bsd-3-clause
joedeller/pymine
helloworld.py
1
3234
#! /usr/bin/python # Joe Deller 2014 # Our first Minecraft program written in the Python language # Level : Beginner # Uses : Libraries # When learning any programming language there is a tradition of writing # your first program to simply say "Hello World!" # The very first line of this program tells the Raspberry Pi we are # running a python program. # The first thing we need to do is tell our program about the # Minecraft Library and to find the Minecraft Manual in that library # Without this, our program won't know anything about Minecraft # Most languages can use libraries as a way of extending the things # they can do. They let us reuse other peoples hard work # so that we don't have to redo it ourselves. # Some libraries contain lots of information, some only a little # On the Raspberry pi, the Minecraft library is fairly small # but has enough that we can do lots of things # The people that write Minecraft wrote a special version for the Raspberry Pi # and a library that lets us do things with it. # import is a Python language keyword. It tells Python to do a very # specific job. In this case to find the Minecraft library import mcpi.minecraft as minecraft # Now we have found our Minecraft instruction manual # we are going to look for the part that tells us how to control minecraft # Then we make something called an object, in this case our object # is a bit like a Smart TV remote control # We also give it a nickname as any easy way of remembering which remote # we mean. In this case, we've called the remote "mc" mc = minecraft.Minecraft.create() # Just as a remote control has lots of buttons that do things # our Minecraft remote control is very similar, except we call the buttons # "methods" # When we want to do something, we press the right button and Minecraft will do something # Much like a smart TV remote searching for a YouTube video, we sometimes type something before # pressing another button on the remote. # The button (method) we are going to press is the postToChat button # This will show us a message in Minecraft # but before we press it, we need to decide what to say # Just like writing a story, we use speech marks to enclose our message # That way the program knows exactly where our message starts and stops # You might have noticed, but most of the program so far has a # # at the start of the line # This tells the computer that the line is a comment for a human to read # and it will ignore it, except in very special cases # Good comments can help other people understand your program # They can help remind you what your program does # Bad comments can help confuse them # Enough already, lets do something! mc.postToChat("Hello World!") # Notice the round brackets () # This tells the program that everything inside them is meant for the postToChat # button # It is a bit like an envelope when you write a letter. # You put your letter inside the envelope and then post it. # In this first program, we only send one piece of information # but as we start to do more complex things, # some buttons need lots of information before they will work # This program only has three lines that actually do anything. # The other 71 are comments like this.
mit
devendermishrajio/nova_test_latest
nova/tests/unit/volume/test_cinder.py
43
16898
# Copyright 2013 Mirantis, Inc. # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import exceptions as cinder_exception import mock from nova import context from nova import exception from nova import test from nova.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed, search_opts=None): if search_opts is not None and 'id' in search_opts: return [{'id': search_opts['id']}] else: return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes class FakeVolume(object): def __init__(self, dict=dict()): self.id = dict.get('id') or '1234' self.status = dict.get('status') or 'available' self.size = dict.get('size') or 1 self.availability_zone = dict.get('availability_zone') or 'cinder' self.created_at = dict.get('created_at') self.attach_time = dict.get('attach_time') self.mountpoint = dict.get('mountpoint') self.display_name = dict.get('display_name') or 'volume-' + self.id self.display_description = dict.get('display_description') or 'fake' self.volume_type_id = dict.get('volume_type_id') self.snapshot_id = dict.get('snapshot_id') self.metadata = dict.get('volume_metadata') or {} class CinderApiTestCase(test.NoDBTestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.mox.StubOutWithMock(cinder, 'cinderclient') self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view') self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view') self.mox.StubOutWithMock(cinder, 'get_cinder_client_version') def test_get(self): volume_id = 'volume_id1' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'}) self.mox.ReplayAll() self.api.get(self.ctx, volume_id) def test_get_failed(self): volume_id = 'volume_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.VolumeNotFound, self.api.get, self.ctx, volume_id) self.assertRaises(exception.InvalidInput, self.api.get, self.ctx, volume_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get, self.ctx, volume_id) def test_create(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create(self.ctx, 1, '', '') @mock.patch('nova.volume.cinder.cinderclient') def test_create_failed(self, mock_cinderclient): mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.BadRequest('')) self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') @mock.patch('nova.volume.cinder.cinderclient') def test_create_over_quota_failed(self, mock_cinderclient): mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.OverLimit(413)) self.assertRaises(exception.OverQuota, self.api.create, self.ctx, 1, '', '') mock_cinderclient.return_value.volumes.create.assert_called_once_with( 1, user_id=None, imageRef=None, availability_zone=None, volume_type=None, description='', snapshot_id=None, name='', project_id=None, metadata=None) def test_get_all(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx)) def test_get_all_with_search(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') self.mox.ReplayAll() self.assertEqual(['id1'], self.api.get_all(self.ctx, search_opts={'id': 'id1'})) def test_check_attach_volume_status_error(self): volume = {'id': 'fake', 'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'id': 'fake', 'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'id': 'fake', 'status': 'available'} volume['attach_status'] = "detached" instance = {'id': 'fake', 'availability_zone': 'zone1', 'host': 'fakehost'} with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1') as mock_get_instance_az: cinder.CONF.set_override('cross_az_attach', False, group='cinder') volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() del instance['host'] volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) self.assertFalse(mock_get_instance_az.called) volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) self.assertFalse(mock_get_instance_az.called) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' instance = {'availability_zone': 'zone1', 'host': 'fakehost'} cinder.CONF.set_override('cross_az_attach', False, group='cinder') with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1'): self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'id': 'fake', 'status': 'available'} self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['status'] = 'non-available' self.assertIsNone(self.api.check_detach(self.ctx, volume)) def test_reserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'reserve', use_mock_anything=True) self.cinderclient.volumes.reserve('id1') self.mox.ReplayAll() self.api.reserve_volume(self.ctx, 'id1') def test_unreserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'unreserve', use_mock_anything=True) self.cinderclient.volumes.unreserve('id1') self.mox.ReplayAll() self.api.unreserve_volume(self.ctx, 'id1') def test_begin_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'begin_detaching', use_mock_anything=True) self.cinderclient.volumes.begin_detaching('id1') self.mox.ReplayAll() self.api.begin_detaching(self.ctx, 'id1') def test_roll_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'roll_detaching', use_mock_anything=True) self.cinderclient.volumes.roll_detaching('id1') self.mox.ReplayAll() self.api.roll_detaching(self.ctx, 'id1') @mock.patch('nova.volume.cinder.cinderclient') def test_attach(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='rw') @mock.patch('nova.volume.cinder.cinderclient') def test_attach_with_mode(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='ro') def test_detach(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'detach', use_mock_anything=True) self.cinderclient.volumes.detach('id1') self.mox.ReplayAll() self.api.detach(self.ctx, 'id1') def test_initialize_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'initialize_connection', use_mock_anything=True) self.cinderclient.volumes.initialize_connection('id1', 'connector') self.mox.ReplayAll() self.api.initialize_connection(self.ctx, 'id1', 'connector') def test_terminate_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'terminate_connection', use_mock_anything=True) self.cinderclient.volumes.terminate_connection('id1', 'connector') self.mox.ReplayAll() self.api.terminate_connection(self.ctx, 'id1', 'connector') def test_delete(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'delete', use_mock_anything=True) self.cinderclient.volumes.delete('id1') self.mox.ReplayAll() self.api.delete(self.ctx, 'id1') def test_update(self): self.assertRaises(NotImplementedError, self.api.update, self.ctx, '', '') def test_get_snapshot(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': snapshot_id}) self.mox.ReplayAll() self.api.get_snapshot(self.ctx, snapshot_id) def test_get_snapshot_failed(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.SnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') def test_create_force(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') def test_delete_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'delete', use_mock_anything=True) self.cinderclient.volume_snapshots.delete('id1') self.mox.ReplayAll() self.api.delete_snapshot(self.ctx, 'id1') def test_update_snapshot_status(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'update_snapshot_status', use_mock_anything=True) self.cinderclient.volume_snapshots.update_snapshot_status( 'id1', {'status': 'error', 'progress': '90%'}) self.mox.ReplayAll() self.api.update_snapshot_status(self.ctx, 'id1', 'error') def test_get_volume_encryption_metadata(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'get_encryption_metadata', use_mock_anything=True) self.cinderclient.volumes.\ get_encryption_metadata({'encryption_key_id': 'fake_key'}) self.mox.ReplayAll() self.api.get_volume_encryption_metadata(self.ctx, {'encryption_key_id': 'fake_key'})
apache-2.0
mnuthan1/workflow
lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py
1730
2278
from __future__ import absolute_import, division, unicode_literals from genshi.core import QName from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT from . import _base from ..constants import voidElements, namespaces class TreeWalker(_base.TreeWalker): def __iter__(self): # Buffer the events so we can pass in the following one previous = None for event in self.tree: if previous is not None: for token in self.tokens(previous, event): yield token previous = event # Don't forget the final event! if previous is not None: for token in self.tokens(previous, None): yield token def tokens(self, event, next): kind, data, pos = event if kind == START: tag, attribs = data name = tag.localname namespace = tag.namespace converted_attribs = {} for k, v in attribs: if isinstance(k, QName): converted_attribs[(k.namespace, k.localname)] = v else: converted_attribs[(None, k)] = v if namespace == namespaces["html"] and name in voidElements: for token in self.emptyTag(namespace, name, converted_attribs, not next or next[0] != END or next[1] != tag): yield token else: yield self.startTag(namespace, name, converted_attribs) elif kind == END: name = data.localname namespace = data.namespace if name not in voidElements: yield self.endTag(namespace, name) elif kind == COMMENT: yield self.comment(data) elif kind == TEXT: for token in self.text(data): yield token elif kind == DOCTYPE: yield self.doctype(*data) elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI): pass else: yield self.unknown(kind)
apache-2.0
ktsamis/repose
repose/command/install.py
2
1838
import concurrent.futures from . import Command from itertools import chain import logging from ..utils import blue logger = logging.getLogger("repose.command.install") class Install(Command): command = True def _run(self, repoq, target): repositories = {} for repa in self.repa: try: repositories.update( repoq.solve_repa(repa, self.targets[target].products.get_base()) ) except ValueError as error: logger.error(error) for repo in chain.from_iterable(x for x in (y for y in repositories.values())): addcmd = self.addcmd.format( name=repo.name, url=repo.url, params="-cfkn" if repo.refresh else "-ckn" ) if self.dryrun: print(blue("{}".format(target)) + " - {}".format(addcmd)) else: self.targets[target].run(addcmd) self._report_target(target) self.targets[target].run(self.refcmd) if repositories.keys(): inscmd = self.ipdcmd.format(products=" ".join(repositories.keys())) if self.dryrun: print(blue(str(target)) + " - {}".format(inscmd)) else: self.targets[target].run(inscmd) self._report_target(target) else: logger.error("No products to install") def run(self): repoq = self._init_repoq() self.targets.read_products() self.targets.read_repos() with concurrent.futures.ThreadPoolExecutor() as executor: targets = [ executor.submit(self._run, repoq, target) for target in self.targets.keys() ] concurrent.futures.wait(targets) self.targets.close()
gpl-3.0